commit:     9a7c837a4ab894a1b92621bb10fccf68c2164b54
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct  5 11:38:18 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct  5 11:38:18 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a7c837a

Linux patch 4.9.53

 0000_README             |    4 +
 1052_linux-4.9.53.patch | 2332 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2336 insertions(+)

diff --git a/0000_README b/0000_README
index 2ae097d..8c1812a 100644
--- a/0000_README
+++ b/0000_README
@@ -251,6 +251,10 @@ Patch:  1051_linux-4.9.52.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.52
 
+Patch:  1052_linux-4.9.53.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.53
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1052_linux-4.9.53.patch b/1052_linux-4.9.53.patch
new file mode 100644
index 0000000..d7f7da2
--- /dev/null
+++ b/1052_linux-4.9.53.patch
@@ -0,0 +1,2332 @@
+diff --git a/Makefile b/Makefile
+index c53de1e38c6a..98e3be659b21 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 52
++SUBLEVEL = 53
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
+index d062f08f5020..4b24964a520a 100644
+--- a/arch/arm/xen/mm.c
++++ b/arch/arm/xen/mm.c
+@@ -199,6 +199,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
+       .unmap_page = xen_swiotlb_unmap_page,
+       .dma_supported = xen_swiotlb_dma_supported,
+       .set_dma_mask = xen_swiotlb_set_dma_mask,
++      .mmap = xen_swiotlb_dma_mmap,
+ };
+ 
+ int __init xen_mm_init(void)
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 332e33193ccf..539bebc1222f 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -486,6 +486,7 @@ ENTRY(kimage_vaddr)
+  * booted in EL1 or EL2 respectively.
+  */
+ ENTRY(el2_setup)
++      msr     SPsel, #1                       // We want to use SP_EL{1,2}
+       mrs     x0, CurrentEL
+       cmp     x0, #CurrentEL_EL2
+       b.ne    1f
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index fec5b1ce97f8..403fe9e57135 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -509,7 +509,7 @@ static const struct fault_info fault_info[] = {
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 0 translation 
fault"     },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 1 translation 
fault"     },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 2 translation 
fault"     },
+-      { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "level 3 translation 
fault"     },
++      { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 3 translation 
fault"     },
+       { do_bad,               SIGBUS,  0,             "unknown 8"             
        },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 1 access flag 
fault"     },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 access flag 
fault"     },
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 767ef6d68c9e..caa659671599 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -1235,10 +1235,14 @@ _GLOBAL(ftrace_caller)
+       stdu    r1,-SWITCH_FRAME_SIZE(r1)
+ 
+       /* Save all gprs to pt_regs */
+-      SAVE_8GPRS(0,r1)
+-      SAVE_8GPRS(8,r1)
+-      SAVE_8GPRS(16,r1)
+-      SAVE_8GPRS(24,r1)
++      SAVE_GPR(0, r1)
++      SAVE_10GPRS(2, r1)
++      SAVE_10GPRS(12, r1)
++      SAVE_10GPRS(22, r1)
++
++      /* Save previous stack pointer (r1) */
++      addi    r8, r1, SWITCH_FRAME_SIZE
++      std     r8, GPR1(r1)
+ 
+       /* Load special regs for save below */
+       mfmsr   r8
+@@ -1292,10 +1296,10 @@ ftrace_call:
+ #endif
+ 
+       /* Restore gprs */
+-      REST_8GPRS(0,r1)
+-      REST_8GPRS(8,r1)
+-      REST_8GPRS(16,r1)
+-      REST_8GPRS(24,r1)
++      REST_GPR(0,r1)
++      REST_10GPRS(2,r1)
++      REST_10GPRS(12,r1)
++      REST_10GPRS(22,r1)
+ 
+       /* Restore callee's TOC */
+       ld      r2, 24(r1)
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index dcbb9144c16d..d97370866a5f 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
+        * in the appropriate thread structures from live.
+        */
+ 
+-      if (tsk != current)
++      if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
+               return;
+ 
+       if (MSR_TM_SUSPENDED(mfmsr())) {
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c 
b/arch/powerpc/kvm/book3s_64_vio.c
+index c379ff5a4438..da2a7eccb10a 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -129,8 +129,11 @@ static int kvm_spapr_tce_mmap(struct file *file, struct 
vm_area_struct *vma)
+ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
+ {
+       struct kvmppc_spapr_tce_table *stt = filp->private_data;
++      struct kvm *kvm = stt->kvm;
+ 
++      mutex_lock(&kvm->lock);
+       list_del_rcu(&stt->list);
++      mutex_unlock(&kvm->lock);
+ 
+       kvm_put_kvm(stt->kvm);
+ 
+@@ -150,6 +153,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+                                  struct kvm_create_spapr_tce_64 *args)
+ {
+       struct kvmppc_spapr_tce_table *stt = NULL;
++      struct kvmppc_spapr_tce_table *siter;
+       unsigned long npages, size;
+       int ret = -ENOMEM;
+       int i;
+@@ -157,24 +161,16 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+       if (!args->size)
+               return -EINVAL;
+ 
+-      /* Check this LIOBN hasn't been previously allocated */
+-      list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+-              if (stt->liobn == args->liobn)
+-                      return -EBUSY;
+-      }
+-
+       size = args->size;
+       npages = kvmppc_tce_pages(size);
+       ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
+-      if (ret) {
+-              stt = NULL;
+-              goto fail;
+-      }
++      if (ret)
++              return ret;
+ 
+       stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
+                     GFP_KERNEL);
+       if (!stt)
+-              goto fail;
++              goto fail_acct;
+ 
+       stt->liobn = args->liobn;
+       stt->page_shift = args->page_shift;
+@@ -188,24 +184,39 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+                       goto fail;
+       }
+ 
+-      kvm_get_kvm(kvm);
+-
+       mutex_lock(&kvm->lock);
+-      list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
++
++      /* Check this LIOBN hasn't been previously allocated */
++      ret = 0;
++      list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
++              if (siter->liobn == args->liobn) {
++                      ret = -EBUSY;
++                      break;
++              }
++      }
++
++      if (!ret)
++              ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
++                                     stt, O_RDWR | O_CLOEXEC);
++
++      if (ret >= 0) {
++              list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
++              kvm_get_kvm(kvm);
++      }
+ 
+       mutex_unlock(&kvm->lock);
+ 
+-      return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+-                              stt, O_RDWR | O_CLOEXEC);
++      if (ret >= 0)
++              return ret;
+ 
+-fail:
+-      if (stt) {
+-              for (i = 0; i < npages; i++)
+-                      if (stt->pages[i])
+-                              __free_page(stt->pages[i]);
++ fail:
++      for (i = 0; i < npages; i++)
++              if (stt->pages[i])
++                      __free_page(stt->pages[i]);
+ 
+-              kfree(stt);
+-      }
++      kfree(stt);
++ fail_acct:
++      kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
+       return ret;
+ }
+ 
+diff --git a/arch/powerpc/platforms/pseries/mobility.c 
b/arch/powerpc/platforms/pseries/mobility.c
+index a560a98bcf3b..6a5e7467445c 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -225,8 +225,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 
drc_index)
+               return -ENOENT;
+ 
+       dn = dlpar_configure_connector(drc_index, parent_dn);
+-      if (!dn)
++      if (!dn) {
++              of_node_put(parent_dn);
+               return -ENOENT;
++      }
+ 
+       rc = dlpar_attach_node(dn);
+       if (rc)
+diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
+index 18d4107e10ee..97fc449a7470 100644
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, 
unsigned long addr,
+ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+               unsigned long end, int write, struct page **pages, int *nr)
+ {
+-      unsigned long mask, result;
+       struct page *head, *page;
++      unsigned long mask;
+       int refs;
+ 
+-      result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
+-      mask = result | _SEGMENT_ENTRY_INVALID;
+-      if ((pmd_val(pmd) & mask) != result)
++      mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
++      if ((pmd_val(pmd) & mask) != 0)
+               return 0;
+       VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
+ 
+diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
+index c114b132d121..7052d9a65fe9 100644
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -130,11 +130,16 @@ int xstateregs_set(struct task_struct *target, const 
struct user_regset *regset,
+ 
+       fpu__activate_fpstate_write(fpu);
+ 
+-      if (boot_cpu_has(X86_FEATURE_XSAVES))
++      if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+               ret = copyin_to_xsaves(kbuf, ubuf, xsave);
+-      else
++      } else {
+               ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, 
-1);
+ 
++              /* xcomp_bv must be 0 when using uncompacted format */
++              if (!ret && xsave->header.xcomp_bv)
++                      ret = -EINVAL;
++      }
++
+       /*
+        * In case of failure, mark all states as init:
+        */
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index a184c210efba..3ec0d2d64601 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -329,6 +329,10 @@ static int __fpu__restore_sig(void __user *buf, void 
__user *buf_fx, int size)
+               } else {
+                       err = __copy_from_user(&fpu->state.xsave,
+                                              buf_fx, state_size);
++
++                      /* xcomp_bv must be 0 when using uncompacted format */
++                      if (!err && state_size > offsetof(struct xregs_state, 
header) && fpu->state.xsave.header.xcomp_bv)
++                              err = -EINVAL;
+               }
+ 
+               if (err || __copy_from_user(&env, buf, sizeof(env))) {
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 55ffd9dc2258..77f17cbfe271 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -141,7 +141,8 @@ void kvm_async_pf_task_wait(u32 token)
+ 
+       n.token = token;
+       n.cpu = smp_processor_id();
+-      n.halted = is_idle_task(current) || preempt_count() > 1;
++      n.halted = is_idle_task(current) || preempt_count() > 1 ||
++                 rcu_preempt_depth();
+       init_swait_queue_head(&n.wq);
+       hlist_add_head(&n.link, &b->list);
+       raw_spin_unlock(&b->lock);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 3dc6d8017ce9..fb49212d25df 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2167,46 +2167,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, 
int cpu)
+       struct pi_desc old, new;
+       unsigned int dest;
+ 
+-      if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+-              !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+-              !kvm_vcpu_apicv_active(vcpu))
++      /*
++       * In case of hot-plug or hot-unplug, we may have to undo
++       * vmx_vcpu_pi_put even if there is no assigned device.  And we
++       * always keep PI.NDST up to date for simplicity: it makes the
++       * code easier, and CPU migration is not a fast path.
++       */
++      if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
++              return;
++
++      /*
++       * First handle the simple case where no cmpxchg is necessary; just
++       * allow posting non-urgent interrupts.
++       *
++       * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
++       * PI.NDST: pi_post_block will do it for us and the wakeup_handler
++       * expects the VCPU to be on the blocked_vcpu_list that matches
++       * PI.NDST.
++       */
++      if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
++          vcpu->cpu == cpu) {
++              pi_clear_sn(pi_desc);
+               return;
++      }
+ 
++      /* The full case.  */
+       do {
+               old.control = new.control = pi_desc->control;
+ 
+-              /*
+-               * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
+-               * are two possible cases:
+-               * 1. After running 'pre_block', context switch
+-               *    happened. For this case, 'sn' was set in
+-               *    vmx_vcpu_put(), so we need to clear it here.
+-               * 2. After running 'pre_block', we were blocked,
+-               *    and woken up by some other guy. For this case,
+-               *    we don't need to do anything, 'pi_post_block'
+-               *    will do everything for us. However, we cannot
+-               *    check whether it is case #1 or case #2 here
+-               *    (maybe, not needed), so we also clear sn here,
+-               *    I think it is not a big deal.
+-               */
+-              if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
+-                      if (vcpu->cpu != cpu) {
+-                              dest = cpu_physical_id(cpu);
+-
+-                              if (x2apic_enabled())
+-                                      new.ndst = dest;
+-                              else
+-                                      new.ndst = (dest << 8) & 0xFF00;
+-                      }
++              dest = cpu_physical_id(cpu);
+ 
+-                      /* set 'NV' to 'notification vector' */
+-                      new.nv = POSTED_INTR_VECTOR;
+-              }
++              if (x2apic_enabled())
++                      new.ndst = dest;
++              else
++                      new.ndst = (dest << 8) & 0xFF00;
+ 
+-              /* Allow posting non-urgent interrupts */
+               new.sn = 0;
+-      } while (cmpxchg(&pi_desc->control, old.control,
+-                      new.control) != old.control);
++      } while (cmpxchg64(&pi_desc->control, old.control,
++                         new.control) != old.control);
+ }
+ 
+ static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
+@@ -4761,21 +4759,30 @@ static inline bool 
kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+ {
+ #ifdef CONFIG_SMP
+       if (vcpu->mode == IN_GUEST_MODE) {
+-              struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+               /*
+-               * Currently, we don't support urgent interrupt,
+-               * all interrupts are recognized as non-urgent
+-               * interrupt, so we cannot post interrupts when
+-               * 'SN' is set.
++               * The vector of interrupt to be delivered to vcpu had
++               * been set in PIR before this function.
+                *
+-               * If the vcpu is in guest mode, it means it is
+-               * running instead of being scheduled out and
+-               * waiting in the run queue, and that's the only
+-               * case when 'SN' is set currently, warning if
+-               * 'SN' is set.
++               * Following cases will be reached in this block, and
++               * we always send a notification event in all cases as
++               * explained below.
++               *
++               * Case 1: vcpu keeps in non-root mode. Sending a
++               * notification event posts the interrupt to vcpu.
++               *
++               * Case 2: vcpu exits to root mode and is still
++               * runnable. PIR will be synced to vIRR before the
++               * next vcpu entry. Sending a notification event in
++               * this case has no effect, as vcpu is not in root
++               * mode.
++               *
++               * Case 3: vcpu exits to root mode and is blocked.
++               * vcpu_block() has already synced PIR to vIRR and
++               * never blocks vcpu if vIRR is not cleared. Therefore,
++               * a blocked vcpu here does not wait for any requested
++               * interrupts in PIR, and sending a notification event
++               * which has no effect is safe here.
+                */
+-              WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
+ 
+               apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+                               POSTED_INTR_VECTOR);
+@@ -9187,6 +9194,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm 
*kvm, unsigned int id)
+ 
+       vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
+ 
++      /*
++       * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
++       * or POSTED_INTR_WAKEUP_VECTOR.
++       */
++      vmx->pi_desc.nv = POSTED_INTR_VECTOR;
++      vmx->pi_desc.sn = 1;
++
+       return &vmx->vcpu;
+ 
+ free_vmcs:
+@@ -9996,6 +10010,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, 
struct vmcs12 *vmcs12)
+               vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+                               page_to_phys(vmx->nested.virtual_apic_page));
+               vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
++      } else {
++#ifdef CONFIG_X86_64
++              exec_control |= CPU_BASED_CR8_LOAD_EXITING |
++                              CPU_BASED_CR8_STORE_EXITING;
++#endif
+       }
+ 
+       if (cpu_has_vmx_msr_bitmap() &&
+@@ -11000,6 +11019,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm 
*kvm,
+       kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
+ }
+ 
++static void __pi_post_block(struct kvm_vcpu *vcpu)
++{
++      struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
++      struct pi_desc old, new;
++      unsigned int dest;
++
++      do {
++              old.control = new.control = pi_desc->control;
++              WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
++                   "Wakeup handler not enabled while the VCPU is blocked\n");
++
++              dest = cpu_physical_id(vcpu->cpu);
++
++              if (x2apic_enabled())
++                      new.ndst = dest;
++              else
++                      new.ndst = (dest << 8) & 0xFF00;
++
++              /* set 'NV' to 'notification vector' */
++              new.nv = POSTED_INTR_VECTOR;
++      } while (cmpxchg64(&pi_desc->control, old.control,
++                         new.control) != old.control);
++
++      if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
++              spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++              list_del(&vcpu->blocked_vcpu_list);
++              spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++              vcpu->pre_pcpu = -1;
++      }
++}
++
+ /*
+  * This routine does the following things for vCPU which is going
+  * to be blocked if VT-d PI is enabled.
+@@ -11015,7 +11065,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm 
*kvm,
+  */
+ static int pi_pre_block(struct kvm_vcpu *vcpu)
+ {
+-      unsigned long flags;
+       unsigned int dest;
+       struct pi_desc old, new;
+       struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+@@ -11025,34 +11074,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
+               !kvm_vcpu_apicv_active(vcpu))
+               return 0;
+ 
+-      vcpu->pre_pcpu = vcpu->cpu;
+-      spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
+-                        vcpu->pre_pcpu), flags);
+-      list_add_tail(&vcpu->blocked_vcpu_list,
+-                    &per_cpu(blocked_vcpu_on_cpu,
+-                    vcpu->pre_pcpu));
+-      spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
+-                             vcpu->pre_pcpu), flags);
++      WARN_ON(irqs_disabled());
++      local_irq_disable();
++      if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
++              vcpu->pre_pcpu = vcpu->cpu;
++              spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++              list_add_tail(&vcpu->blocked_vcpu_list,
++                            &per_cpu(blocked_vcpu_on_cpu,
++                                     vcpu->pre_pcpu));
++              spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++      }
+ 
+       do {
+               old.control = new.control = pi_desc->control;
+ 
+-              /*
+-               * We should not block the vCPU if
+-               * an interrupt is posted for it.
+-               */
+-              if (pi_test_on(pi_desc) == 1) {
+-                      spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
+-                                        vcpu->pre_pcpu), flags);
+-                      list_del(&vcpu->blocked_vcpu_list);
+-                      spin_unlock_irqrestore(
+-                                      &per_cpu(blocked_vcpu_on_cpu_lock,
+-                                      vcpu->pre_pcpu), flags);
+-                      vcpu->pre_pcpu = -1;
+-
+-                      return 1;
+-              }
+-
+               WARN((pi_desc->sn == 1),
+                    "Warning: SN field of posted-interrupts "
+                    "is set before blocking\n");
+@@ -11074,10 +11109,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
+ 
+               /* set 'NV' to 'wakeup vector' */
+               new.nv = POSTED_INTR_WAKEUP_VECTOR;
+-      } while (cmpxchg(&pi_desc->control, old.control,
+-                      new.control) != old.control);
++      } while (cmpxchg64(&pi_desc->control, old.control,
++                         new.control) != old.control);
+ 
+-      return 0;
++      /* We should not block the vCPU if an interrupt is posted for it.  */
++      if (pi_test_on(pi_desc) == 1)
++              __pi_post_block(vcpu);
++
++      local_irq_enable();
++      return (vcpu->pre_pcpu == -1);
+ }
+ 
+ static int vmx_pre_block(struct kvm_vcpu *vcpu)
+@@ -11093,44 +11133,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
+ 
+ static void pi_post_block(struct kvm_vcpu *vcpu)
+ {
+-      struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+-      struct pi_desc old, new;
+-      unsigned int dest;
+-      unsigned long flags;
+-
+-      if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+-              !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+-              !kvm_vcpu_apicv_active(vcpu))
++      if (vcpu->pre_pcpu == -1)
+               return;
+ 
+-      do {
+-              old.control = new.control = pi_desc->control;
+-
+-              dest = cpu_physical_id(vcpu->cpu);
+-
+-              if (x2apic_enabled())
+-                      new.ndst = dest;
+-              else
+-                      new.ndst = (dest << 8) & 0xFF00;
+-
+-              /* Allow posting non-urgent interrupts */
+-              new.sn = 0;
+-
+-              /* set 'NV' to 'notification vector' */
+-              new.nv = POSTED_INTR_VECTOR;
+-      } while (cmpxchg(&pi_desc->control, old.control,
+-                      new.control) != old.control);
+-
+-      if(vcpu->pre_pcpu != -1) {
+-              spin_lock_irqsave(
+-                      &per_cpu(blocked_vcpu_on_cpu_lock,
+-                      vcpu->pre_pcpu), flags);
+-              list_del(&vcpu->blocked_vcpu_list);
+-              spin_unlock_irqrestore(
+-                      &per_cpu(blocked_vcpu_on_cpu_lock,
+-                      vcpu->pre_pcpu), flags);
+-              vcpu->pre_pcpu = -1;
+-      }
++      WARN_ON(irqs_disabled());
++      local_irq_disable();
++      __pi_post_block(vcpu);
++      local_irq_enable();
+ }
+ 
+ static void vmx_post_block(struct kvm_vcpu *vcpu)
+@@ -11158,7 +11167,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, 
unsigned int host_irq,
+       struct kvm_lapic_irq irq;
+       struct kvm_vcpu *vcpu;
+       struct vcpu_data vcpu_info;
+-      int idx, ret = -EINVAL;
++      int idx, ret = 0;
+ 
+       if (!kvm_arch_has_assigned_device(kvm) ||
+               !irq_remapping_cap(IRQ_POSTING_CAP) ||
+@@ -11167,7 +11176,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, 
unsigned int host_irq,
+ 
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-      BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
++      if (guest_irq >= irq_rt->nr_rt_entries ||
++          hlist_empty(&irq_rt->map[guest_irq])) {
++              pr_warn_once("no route for guest_irq %u/%u (broken user 
space?)\n",
++                           guest_irq, irq_rt->nr_rt_entries);
++              goto out;
++      }
+ 
+       hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
+               if (e->type != KVM_IRQ_ROUTING_MSI)
+@@ -11210,12 +11224,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, 
unsigned int host_irq,
+ 
+               if (set)
+                       ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
+-              else {
+-                      /* suppress notification event before unposting */
+-                      pi_set_sn(vcpu_to_pi_desc(vcpu));
++              else
+                       ret = irq_set_vcpu_affinity(host_irq, NULL);
+-                      pi_clear_sn(vcpu_to_pi_desc(vcpu));
+-              }
+ 
+               if (ret < 0) {
+                       printk(KERN_INFO "%s: failed to update PI IRTE\n",
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 9f72ca3b2669..1dd796025472 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -191,8 +191,7 @@ is_prefetch(struct pt_regs *regs, unsigned long 
error_code, unsigned long addr)
+  * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
+  *         faulted on a pte with its pkey=4.
+  */
+-static void fill_sig_info_pkey(int si_code, siginfo_t *info,
+-              struct vm_area_struct *vma)
++static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
+ {
+       /* This is effectively an #ifdef */
+       if (!boot_cpu_has(X86_FEATURE_OSPKE))
+@@ -208,7 +207,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t 
*info,
+        * valid VMA, so we should never reach this without a
+        * valid VMA.
+        */
+-      if (!vma) {
++      if (!pkey) {
+               WARN_ONCE(1, "PKU fault with no VMA passed in");
+               info->si_pkey = 0;
+               return;
+@@ -218,13 +217,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t 
*info,
+        * absolutely guranteed to be 100% accurate because of
+        * the race explained above.
+        */
+-      info->si_pkey = vma_pkey(vma);
++      info->si_pkey = *pkey;
+ }
+ 
+ static void
+ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+-                   struct task_struct *tsk, struct vm_area_struct *vma,
+-                   int fault)
++                   struct task_struct *tsk, u32 *pkey, int fault)
+ {
+       unsigned lsb = 0;
+       siginfo_t info;
+@@ -239,7 +237,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned 
long address,
+               lsb = PAGE_SHIFT;
+       info.si_addr_lsb = lsb;
+ 
+-      fill_sig_info_pkey(si_code, &info, vma);
++      fill_sig_info_pkey(si_code, &info, pkey);
+ 
+       force_sig_info(si_signo, &info, tsk);
+ }
+@@ -718,8 +716,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
+       struct task_struct *tsk = current;
+       unsigned long flags;
+       int sig;
+-      /* No context means no VMA to pass down */
+-      struct vm_area_struct *vma = NULL;
+ 
+       /* Are we prepared to handle this kernel fault? */
+       if (fixup_exception(regs, X86_TRAP_PF)) {
+@@ -744,7 +740,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
+ 
+                       /* XXX: hwpoison faults will set the wrong code. */
+                       force_sig_info_fault(signal, si_code, address,
+-                                           tsk, vma, 0);
++                                           tsk, NULL, 0);
+               }
+ 
+               /*
+@@ -853,8 +849,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long 
error_code,
+ 
+ static void
+ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+-                     unsigned long address, struct vm_area_struct *vma,
+-                     int si_code)
++                     unsigned long address, u32 *pkey, int si_code)
+ {
+       struct task_struct *tsk = current;
+ 
+@@ -902,7 +897,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long 
error_code,
+               tsk->thread.error_code  = error_code;
+               tsk->thread.trap_nr     = X86_TRAP_PF;
+ 
+-              force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
++              force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
+ 
+               return;
+       }
+@@ -915,9 +910,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long 
error_code,
+ 
+ static noinline void
+ bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+-                   unsigned long address, struct vm_area_struct *vma)
++                   unsigned long address, u32 *pkey)
+ {
+-      __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
++      __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
+ }
+ 
+ static void
+@@ -925,6 +920,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
+          unsigned long address,  struct vm_area_struct *vma, int si_code)
+ {
+       struct mm_struct *mm = current->mm;
++      u32 pkey;
++
++      if (vma)
++              pkey = vma_pkey(vma);
+ 
+       /*
+        * Something tried to access memory that isn't in our memory map..
+@@ -932,7 +931,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
+        */
+       up_read(&mm->mmap_sem);
+ 
+-      __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
++      __bad_area_nosemaphore(regs, error_code, address,
++                             (vma) ? &pkey : NULL, si_code);
+ }
+ 
+ static noinline void
+@@ -975,7 +975,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long 
error_code,
+ 
+ static void
+ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long 
address,
+-        struct vm_area_struct *vma, unsigned int fault)
++        u32 *pkey, unsigned int fault)
+ {
+       struct task_struct *tsk = current;
+       int code = BUS_ADRERR;
+@@ -1002,13 +1002,12 @@ do_sigbus(struct pt_regs *regs, unsigned long 
error_code, unsigned long address,
+               code = BUS_MCEERR_AR;
+       }
+ #endif
+-      force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
++      force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
+ }
+ 
+ static noinline void
+ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+-             unsigned long address, struct vm_area_struct *vma,
+-             unsigned int fault)
++             unsigned long address, u32 *pkey, unsigned int fault)
+ {
+       if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
+               no_context(regs, error_code, address, 0, 0);
+@@ -1032,9 +1031,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long 
error_code,
+       } else {
+               if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+                            VM_FAULT_HWPOISON_LARGE))
+-                      do_sigbus(regs, error_code, address, vma, fault);
++                      do_sigbus(regs, error_code, address, pkey, fault);
+               else if (fault & VM_FAULT_SIGSEGV)
+-                      bad_area_nosemaphore(regs, error_code, address, vma);
++                      bad_area_nosemaphore(regs, error_code, address, pkey);
+               else
+                       BUG();
+       }
+@@ -1220,6 +1219,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
+       struct mm_struct *mm;
+       int fault, major = 0;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
++      u32 pkey;
+ 
+       tsk = current;
+       mm = tsk->mm;
+@@ -1420,9 +1420,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
+               return;
+       }
+ 
++      pkey = vma_pkey(vma);
+       up_read(&mm->mmap_sem);
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+-              mm_fault_error(regs, error_code, address, vma, fault);
++              mm_fault_error(regs, error_code, address, &pkey, fault);
+               return;
+       }
+ 
+diff --git a/block/bsg-lib.c b/block/bsg-lib.c
+index 650f427d915b..341b8d858e67 100644
+--- a/block/bsg-lib.c
++++ b/block/bsg-lib.c
+@@ -147,7 +147,6 @@ static int bsg_create_job(struct device *dev, struct 
request *req)
+ failjob_rls_rqst_payload:
+       kfree(job->request_payload.sg_list);
+ failjob_rls_job:
+-      kfree(job);
+       return -ENOMEM;
+ }
+ 
+diff --git a/crypto/drbg.c b/crypto/drbg.c
+index 8cac3d31a5f8..942ddff68408 100644
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct 
drbg_state *drbg)
+ {
+       if (!drbg)
+               return;
+-      kzfree(drbg->V);
+-      drbg->Vbuf = NULL;
+-      kzfree(drbg->C);
+-      drbg->Cbuf = NULL;
++      kzfree(drbg->Vbuf);
++      drbg->V = NULL;
++      kzfree(drbg->Cbuf);
++      drbg->C = NULL;
+       kzfree(drbg->scratchpadbuf);
+       drbg->scratchpadbuf = NULL;
+       drbg->reseed_ctr = 0;
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 2932a5bd892f..dfffba39f723 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1757,10 +1757,13 @@ void device_pm_check_callbacks(struct device *dev)
+ {
+       spin_lock_irq(&dev->power.lock);
+       dev->power.no_pm_callbacks =
+-              (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
+-              (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
++              (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
++               !dev->bus->suspend && !dev->bus->resume)) &&
++              (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
++               !dev->class->suspend && !dev->class->resume)) &&
+               (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
+               (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
+-              (!dev->driver || pm_ops_is_empty(dev->driver->pm));
++              (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
++               !dev->driver->suspend && !dev->driver->resume));
+       spin_unlock_irq(&dev->power.lock);
+ }
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 571de2f284cf..e2d323fa2437 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc 
*edesc,
+               req_ctx->swinit = 0;
+       } else {
+               desc->ptr[1] = zero_entry;
+-              /* Indicate next op is not the first. */
+-              req_ctx->first = 0;
+       }
++      /* Indicate next op is not the first. */
++      req_ctx->first = 0;
+ 
+       /* HMAC key */
+       if (ctx->keylen)
+@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc 
*edesc,
+ 
+       sg_count = edesc->src_nents ?: 1;
+       if (is_sec1 && sg_count > 1)
+-              sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
++              sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
+       else
+               sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
+                                     DMA_TO_DEVICE);
+@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg 
*talitos_alg_alloc(struct device *dev,
+               t_alg->algt.alg.hash.final = ahash_final;
+               t_alg->algt.alg.hash.finup = ahash_finup;
+               t_alg->algt.alg.hash.digest = ahash_digest;
+-              t_alg->algt.alg.hash.setkey = ahash_setkey;
++              if (!strncmp(alg->cra_name, "hmac", 4))
++                      t_alg->algt.alg.hash.setkey = ahash_setkey;
+               t_alg->algt.alg.hash.import = ahash_import;
+               t_alg->algt.alg.hash.export = ahash_export;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index 0370b842d9cc..82dd57d4843c 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -549,12 +549,15 @@ static const struct etnaviv_gem_ops 
etnaviv_gem_shmem_ops = {
+ void etnaviv_gem_free_object(struct drm_gem_object *obj)
+ {
+       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
++      struct etnaviv_drm_private *priv = obj->dev->dev_private;
+       struct etnaviv_vram_mapping *mapping, *tmp;
+ 
+       /* object should not be active */
+       WARN_ON(is_active(etnaviv_obj));
+ 
++      mutex_lock(&priv->gem_lock);
+       list_del(&etnaviv_obj->gem_node);
++      mutex_unlock(&priv->gem_lock);
+ 
+       list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
+                                obj_node) {
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c 
b/drivers/gpu/drm/radeon/radeon_device.c
+index 3b21ca5a6c81..82b01123c386 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1674,7 +1674,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool 
suspend,
+       radeon_agp_suspend(rdev);
+ 
+       pci_save_state(dev->pdev);
+-      if (freeze && rdev->family >= CHIP_CEDAR) {
++      if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & 
RADEON_IS_IGP)) {
+               rdev->asic->asic_reset(rdev, true);
+               pci_restore_state(dev->pdev);
+       } else if (suspend) {
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c 
b/drivers/infiniband/hw/cxgb4/cm.c
+index 9398143d7c5e..6512a555f7f8 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2577,9 +2577,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct 
sk_buff *skb)
+       c4iw_put_ep(&child_ep->com);
+ reject:
+       reject_cr(dev, hwtid, skb);
++out:
+       if (parent_ep)
+               c4iw_put_ep(&parent_ep->com);
+-out:
+       return 0;
+ }
+ 
+@@ -3441,7 +3441,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int 
backlog)
+               cm_id->provider_data = ep;
+               goto out;
+       }
+-
++      remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
+       cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
+                       ep->com.local_addr.ss_family);
+ fail2:
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 549b4afd12e1..7aea0221530c 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -829,6 +829,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, 
struct stripe_head *sh
+                       spin_unlock(&head->batch_head->batch_lock);
+                       goto unlock_out;
+               }
++              /*
++               * We must assign batch_head of this stripe within the
++               * batch_lock, otherwise clear_batch_ready of batch head
++               * stripe could clear BATCH_READY bit of this stripe and
++               * this stripe->batch_head doesn't get assigned, which
++               * could confuse clear_batch_ready for this stripe
++               */
++              sh->batch_head = head->batch_head;
+ 
+               /*
+                * at this point, head's BATCH_READY could be cleared, but we
+@@ -836,8 +844,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, 
struct stripe_head *sh
+                */
+               list_add(&sh->batch_list, &head->batch_list);
+               spin_unlock(&head->batch_head->batch_lock);
+-
+-              sh->batch_head = head->batch_head;
+       } else {
+               head->batch_head = head;
+               sh->batch_head = head->batch_head;
+@@ -4277,7 +4283,8 @@ static void break_stripe_batch_list(struct stripe_head 
*head_sh,
+ 
+               set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+                                           (1 << STRIPE_PREREAD_ACTIVE) |
+-                                          (1 << STRIPE_DEGRADED)),
++                                          (1 << STRIPE_DEGRADED) |
++                                          (1 << STRIPE_ON_UNPLUG_LIST)),
+                             head_sh->state & (1 << STRIPE_INSYNC));
+ 
+               sh->check_state = head_sh->check_state;
+diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
+index 2e5233b60971..ae856161faa9 100644
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -244,6 +244,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
+               ctx->real_mode = false;
+       }
+ 
++      /*
++       * Increment driver use count. Enables global TLBIs for hash
++       * and callbacks to handle the segment table
++       */
+       cxl_ctx_get();
+ 
+       if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
+index afa211397048..d3e009438991 100644
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -91,7 +91,6 @@ static int __afu_open(struct inode *inode, struct file 
*file, bool master)
+ 
+       pr_devel("afu_open pe: %i\n", ctx->pe);
+       file->private_data = ctx;
+-      cxl_ctx_get();
+ 
+       /* indicate success */
+       rc = 0;
+@@ -213,6 +212,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+       ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ 
+ 
++      /*
++       * Increment driver use count. Enables global TLBIs for hash
++       * and callbacks to handle the segment table
++       */
++      cxl_ctx_get();
++
+       trace_cxl_attach(ctx, work.work_element_descriptor, 
work.num_interrupts, amr);
+ 
+       if ((rc = cxl_ops->attach_process(ctx, false, 
work.work_element_descriptor,
+@@ -222,6 +227,7 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+               put_pid(ctx->glpid);
+               put_pid(ctx->pid);
+               ctx->glpid = ctx->pid = NULL;
++              cxl_ctx_put();
+               goto out;
+       }
+ 
+diff --git a/drivers/net/wireless/mac80211_hwsim.c 
b/drivers/net/wireless/mac80211_hwsim.c
+index 0fd7d7ed07ce..c06932c5ecdb 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -1357,8 +1357,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+                                      txi->control.rates,
+                                      ARRAY_SIZE(txi->control.rates));
+ 
+-      txi->rate_driver_data[0] = channel;
+-
+       if (skb->len >= 24 + 8 &&
+           ieee80211_is_probe_resp(hdr->frame_control)) {
+               /* fake header transmission time */
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 1b0786555394..f9f4d1c18eb2 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -527,7 +527,7 @@ static ssize_t driver_override_store(struct device *dev,
+                                    const char *buf, size_t count)
+ {
+       struct pci_dev *pdev = to_pci_dev(dev);
+-      char *driver_override, *old = pdev->driver_override, *cp;
++      char *driver_override, *old, *cp;
+ 
+       /* We need to keep extra room for a newline */
+       if (count >= (PAGE_SIZE - 1))
+@@ -541,12 +541,15 @@ static ssize_t driver_override_store(struct device *dev,
+       if (cp)
+               *cp = '\0';
+ 
++      device_lock(dev);
++      old = pdev->driver_override;
+       if (strlen(driver_override)) {
+               pdev->driver_override = driver_override;
+       } else {
+               kfree(driver_override);
+               pdev->driver_override = NULL;
+       }
++      device_unlock(dev);
+ 
+       kfree(old);
+ 
+@@ -557,8 +560,12 @@ static ssize_t driver_override_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+ {
+       struct pci_dev *pdev = to_pci_dev(dev);
++      ssize_t len;
+ 
+-      return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
++      device_lock(dev);
++      len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
++      device_unlock(dev);
++      return len;
+ }
+ static DEVICE_ATTR_RW(driver_override);
+ 
+diff --git a/drivers/scsi/scsi_transport_iscsi.c 
b/drivers/scsi/scsi_transport_iscsi.c
+index 42bca619f854..c39551b32e94 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3696,7 +3696,7 @@ iscsi_if_rx(struct sk_buff *skb)
+               uint32_t group;
+ 
+               nlh = nlmsg_hdr(skb);
+-              if (nlh->nlmsg_len < sizeof(*nlh) ||
++              if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
+                   skb->len < nlh->nlmsg_len) {
+                       break;
+               }
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c 
b/drivers/video/fbdev/aty/atyfb_base.c
+index 11026e726b68..81367cf0af77 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -1861,7 +1861,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, 
u_long arg)
+ #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
+       case ATYIO_CLKR:
+               if (M64_HAS(INTEGRATED)) {
+-                      struct atyclk clk;
++                      struct atyclk clk = { 0 };
+                       union aty_pll *pll = &par->pll;
+                       u32 dsp_config = pll->ct.dsp_config;
+                       u32 dsp_on_off = pll->ct.dsp_on_off;
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 679f79f68182..b68ced5a6331 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -680,3 +680,22 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
++
++/*
++ * Create userspace mapping for the DMA-coherent memory.
++ * This function should be called with the pages from the current domain only,
++ * passing pages mapped from other domains would lead to memory corruption.
++ */
++int
++xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++                   void *cpu_addr, dma_addr_t dma_addr, size_t size,
++                   unsigned long attrs)
++{
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++      if (__generic_dma_ops(dev)->mmap)
++              return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
++                                                  dma_addr, size, attrs);
++#endif
++      return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 1782804f6c26..0fe346c4bd28 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3052,7 +3052,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 
loff,
+ out:
+       if (ret)
+               btrfs_cmp_data_free(cmp);
+-      return 0;
++      return ret;
+ }
+ 
+ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
+@@ -4082,6 +4082,10 @@ static long btrfs_ioctl_default_subvol(struct file 
*file, void __user *argp)
+               ret = PTR_ERR(new_root);
+               goto out;
+       }
++      if (!is_fstree(new_root->objectid)) {
++              ret = -ENOENT;
++              goto out;
++      }
+ 
+       path = btrfs_alloc_path();
+       if (!path) {
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 2cf5e142675e..04c61bcf62e5 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -2367,11 +2367,11 @@ void free_reloc_roots(struct list_head *list)
+       while (!list_empty(list)) {
+               reloc_root = list_entry(list->next, struct btrfs_root,
+                                       root_list);
++              __del_reloc_root(reloc_root);
+               free_extent_buffer(reloc_root->node);
+               free_extent_buffer(reloc_root->commit_root);
+               reloc_root->node = NULL;
+               reloc_root->commit_root = NULL;
+-              __del_reloc_root(reloc_root);
+       }
+ }
+ 
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index c0c253005b76..87658f63b374 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1360,7 +1360,7 @@ exit_cifs(void)
+       exit_cifs_idmap();
+ #endif
+ #ifdef CONFIG_CIFS_UPCALL
+-      unregister_key_type(&cifs_spnego_key_type);
++      exit_cifs_spnego();
+ #endif
+       cifs_destroy_request_bufs();
+       cifs_destroy_mids();
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 1a545695f547..f6712b6128d8 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -4071,6 +4071,14 @@ cifs_setup_session(const unsigned int xid, struct 
cifs_ses *ses,
+       cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
+                server->sec_mode, server->capabilities, server->timeAdj);
+ 
++      if (ses->auth_key.response) {
++              cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
++                       ses->auth_key.response);
++              kfree(ses->auth_key.response);
++              ses->auth_key.response = NULL;
++              ses->auth_key.len = 0;
++      }
++
+       if (server->ops->sess_setup)
+               rc = server->ops->sess_setup(xid, ses, nls_info);
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 3925758f6dde..cf192f9ce254 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct 
cifs_sb_info *cifs_sb,
+       if (backup_cred(cifs_sb))
+               create_options |= CREATE_OPEN_BACKUP_INTENT;
+ 
++      /* O_SYNC also has bit for O_DSYNC so following check picks up either */
++      if (f_flags & O_SYNC)
++              create_options |= CREATE_WRITE_THROUGH;
++
++      if (f_flags & O_DIRECT)
++              create_options |= CREATE_NO_BUFFER;
++
+       oparms.tcon = tcon;
+       oparms.cifs_sb = cifs_sb;
+       oparms.desired_access = desired_access;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 0437e5fdba56..69b610ad3fdc 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -366,7 +366,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req)
+       build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
+       req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
+       req->NegotiateContextCount = cpu_to_le16(2);
+-      inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
++      inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
+                       + sizeof(struct smb2_encryption_neg_context)); /* 
calculate hash */
+ }
+ #else
+@@ -531,15 +531,22 @@ int smb3_validate_negotiate(const unsigned int xid, 
struct cifs_tcon *tcon)
+ 
+       /*
+        * validation ioctl must be signed, so no point sending this if we
+-       * can not sign it.  We could eventually change this to selectively
++       * can not sign it (ie are not known user).  Even if signing is not
++       * required (enabled but not negotiated), in those cases we selectively
+        * sign just this, the first and only signed request on a connection.
+-       * This is good enough for now since a user who wants better security
+-       * would also enable signing on the mount. Having validation of
+-       * negotiate info for signed connections helps reduce attack vectors
++       * Having validation of negotiate info  helps reduce attack vectors.
+        */
+-      if (tcon->ses->server->sign == false)
++      if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
+               return 0; /* validation requires signing */
+ 
++      if (tcon->ses->user_name == NULL) {
++              cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
++              return 0; /* validation requires signing */
++      }
++
++      if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
++              cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent 
by server\n");
++
+       vneg_inbuf.Capabilities =
+                       cpu_to_le32(tcon->ses->server->vals->req_capabilities);
+       memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
+@@ -1010,6 +1017,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses 
*ses,
+       while (sess_data->func)
+               sess_data->func(sess_data);
+ 
++      if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
++              cifs_dbg(VFS, "signing requested but authenticated as guest\n");
+       rc = sess_data->result;
+ out:
+       kfree(sess_data);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 7bff6f46f5da..f7cae1629c6c 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1836,13 +1836,9 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, 
loff_t *pos)
+ {
+       struct gfs2_glock_iter *gi = seq->private;
+       loff_t n = *pos;
+-      int ret;
+-
+-      if (gi->last_pos <= *pos)
+-              n = (*pos - gi->last_pos);
+ 
+-      ret = rhashtable_walk_start(&gi->hti);
+-      if (ret)
++      rhashtable_walk_enter(&gl_hash_table, &gi->hti);
++      if (rhashtable_walk_start(&gi->hti) != 0)
+               return NULL;
+ 
+       do {
+@@ -1850,6 +1846,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, 
loff_t *pos)
+       } while (gi->gl && n--);
+ 
+       gi->last_pos = *pos;
++
+       return gi->gl;
+ }
+ 
+@@ -1861,6 +1858,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, 
void *iter_ptr,
+       (*pos)++;
+       gi->last_pos = *pos;
+       gfs2_glock_iter_next(gi);
++
+       return gi->gl;
+ }
+ 
+@@ -1870,6 +1868,7 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, 
void *iter_ptr)
+ 
+       gi->gl = NULL;
+       rhashtable_walk_stop(&gi->hti);
++      rhashtable_walk_exit(&gi->hti);
+ }
+ 
+ static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
+@@ -1932,12 +1931,10 @@ static int gfs2_glocks_open(struct inode *inode, 
struct file *file)
+               struct gfs2_glock_iter *gi = seq->private;
+ 
+               gi->sdp = inode->i_private;
+-              gi->last_pos = 0;
+               seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | 
__GFP_NOWARN);
+               if (seq->buf)
+                       seq->size = GFS2_SEQ_GOODSIZE;
+               gi->gl = NULL;
+-              ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, 
GFP_KERNEL);
+       }
+       return ret;
+ }
+@@ -1948,7 +1945,6 @@ static int gfs2_glocks_release(struct inode *inode, 
struct file *file)
+       struct gfs2_glock_iter *gi = seq->private;
+ 
+       gi->gl = NULL;
+-      rhashtable_walk_exit(&gi->hti);
+       return seq_release_private(inode, file);
+ }
+ 
+@@ -1960,12 +1956,10 @@ static int gfs2_glstats_open(struct inode *inode, 
struct file *file)
+               struct seq_file *seq = file->private_data;
+               struct gfs2_glock_iter *gi = seq->private;
+               gi->sdp = inode->i_private;
+-              gi->last_pos = 0;
+               seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | 
__GFP_NOWARN);
+               if (seq->buf)
+                       seq->size = GFS2_SEQ_GOODSIZE;
+               gi->gl = NULL;
+-              ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, 
GFP_KERNEL);
+       }
+       return ret;
+ }
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 81818adb8e9e..c932ec454625 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -60,6 +60,7 @@
+ #include <linux/tty.h>
+ #include <linux/string.h>
+ #include <linux/mman.h>
++#include <linux/sched.h>
+ #include <linux/proc_fs.h>
+ #include <linux/ioport.h>
+ #include <linux/uaccess.h>
+@@ -416,7 +417,15 @@ static int do_task_stat(struct seq_file *m, struct 
pid_namespace *ns,
+                * esp and eip are intentionally zeroed out.  There is no
+                * non-racy way to read them without freezing the task.
+                * Programs that need reliable values can use ptrace(2).
++               *
++               * The only exception is if the task is core dumping because
++               * a program is not able to use ptrace(2) in that case. It is
++               * safe because the task has stopped executing permanently.
+                */
++              if (permitted && (task->flags & PF_DUMPCORE)) {
++                      eip = KSTK_EIP(task);
++                      esp = KSTK_ESP(task);
++              }
+       }
+ 
+       get_task_comm(tcomm, task);
+diff --git a/fs/read_write.c b/fs/read_write.c
+index e479e24dcd4c..09a8757efd34 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -114,7 +114,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, 
int whence,
+                * In the generic case the entire file is data, so as long as
+                * offset isn't at the end of the file then the offset is data.
+                */
+-              if (offset >= eof)
++              if ((unsigned long long)offset >= eof)
+                       return -ENXIO;
+               break;
+       case SEEK_HOLE:
+@@ -122,7 +122,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, 
int whence,
+                * There is a virtual hole at the end of the file, so as long as
+                * offset isn't i_size or larger, return i_size.
+                */
+-              if (offset >= eof)
++              if ((unsigned long long)offset >= eof)
+                       return -ENXIO;
+               offset = eof;
+               break;
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index bce2e260f55e..6c95812120eb 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1085,6 +1085,7 @@ xfs_ioctl_setattr_dax_invalidate(
+       int                     *join_flags)
+ {
+       struct inode            *inode = VFS_I(ip);
++      struct super_block      *sb = inode->i_sb;
+       int                     error;
+ 
+       *join_flags = 0;
+@@ -1097,7 +1098,7 @@ xfs_ioctl_setattr_dax_invalidate(
+       if (fa->fsx_xflags & FS_XFLAG_DAX) {
+               if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
+                       return -EINVAL;
+-              if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE)
++              if (bdev_dax_supported(sb, sb->s_blocksize) < 0)
+                       return -EINVAL;
+       }
+ 
+diff --git a/include/linux/key.h b/include/linux/key.h
+index 722914798f37..6a544726903e 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -176,6 +176,7 @@ struct key {
+ #define KEY_FLAG_BUILTIN      8       /* set if key is built in to the kernel 
*/
+ #define KEY_FLAG_ROOT_CAN_INVAL       9       /* set if key can be 
invalidated by root without permission */
+ #define KEY_FLAG_KEEP         10      /* set if key should not be removed */
++#define KEY_FLAG_UID_KEYRING  11      /* set if key is a user or user session 
keyring */
+ 
+       /* the key type and key description string
+        * - the desc is used to match a key against search criteria
+@@ -235,6 +236,7 @@ extern struct key *key_alloc(struct key_type *type,
+ #define KEY_ALLOC_NOT_IN_QUOTA                0x0002  /* not in quota */
+ #define KEY_ALLOC_BUILT_IN            0x0004  /* Key is built into kernel */
+ #define KEY_ALLOC_BYPASS_RESTRICTION  0x0008  /* Override the check on 
restricted keyrings */
++#define KEY_ALLOC_UID_KEYRING         0x0010  /* allocating a user or user 
session keyring */
+ 
+ extern void key_revoke(struct key *key);
+ extern void key_invalidate(struct key *key);
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index e2dba93e374f..2c7d876e2a1a 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -902,21 +902,10 @@ struct ieee80211_tx_info {
+                               unsigned long jiffies;
+                       };
+                       /* NB: vif can be NULL for injected frames */
+-                      union {
+-                              /* NB: vif can be NULL for injected frames */
+-                              struct ieee80211_vif *vif;
+-
+-                              /* When packets are enqueued on txq it's easy
+-                               * to re-construct the vif pointer. There's no
+-                               * more space in tx_info so it can be used to
+-                               * store the necessary enqueue time for packet
+-                               * sojourn time computation.
+-                               */
+-                              codel_time_t enqueue_time;
+-                      };
++                      struct ieee80211_vif *vif;
+                       struct ieee80211_key_conf *hw_key;
+                       u32 flags;
+-                      /* 4 bytes free */
++                      codel_time_t enqueue_time;
+               } control;
+               struct {
+                       u64 cookie;
+diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
+index 7c35e279d1e3..683057f79dca 100644
+--- a/include/xen/swiotlb-xen.h
++++ b/include/xen/swiotlb-xen.h
+@@ -58,4 +58,9 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
+ 
+ extern int
+ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
++
++extern int
++xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++                   void *cpu_addr, dma_addr_t dma_addr, size_t size,
++                   unsigned long attrs);
+ #endif /* __LINUX_SWIOTLB_XEN_H */
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 00bb0aeea1d0..77977f55dff7 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -405,10 +405,8 @@ static void free_desc(unsigned int irq)
+        * The sysfs entry must be serialized against a concurrent
+        * irq_sysfs_init() as well.
+        */
+-      mutex_lock(&sparse_irq_lock);
+       kobject_del(&desc->kobj);
+       delete_irq_desc(irq);
+-      mutex_unlock(&sparse_irq_lock);
+ 
+       /*
+        * We free the descriptor, masks and stat fields via RCU. That
+@@ -446,20 +444,15 @@ static int alloc_descs(unsigned int start, unsigned int 
cnt, int node,
+               desc = alloc_desc(start + i, node, flags, mask, owner);
+               if (!desc)
+                       goto err;
+-              mutex_lock(&sparse_irq_lock);
+               irq_insert_desc(start + i, desc);
+               irq_sysfs_add(start + i, desc);
+-              mutex_unlock(&sparse_irq_lock);
+       }
++      bitmap_set(allocated_irqs, start, cnt);
+       return start;
+ 
+ err:
+       for (i--; i >= 0; i--)
+               free_desc(start + i);
+-
+-      mutex_lock(&sparse_irq_lock);
+-      bitmap_clear(allocated_irqs, start, cnt);
+-      mutex_unlock(&sparse_irq_lock);
+       return -ENOMEM;
+ }
+ 
+@@ -558,6 +551,7 @@ static inline int alloc_descs(unsigned int start, unsigned 
int cnt, int node,
+ 
+               desc->owner = owner;
+       }
++      bitmap_set(allocated_irqs, start, cnt);
+       return start;
+ }
+ 
+@@ -653,10 +647,10 @@ void irq_free_descs(unsigned int from, unsigned int cnt)
+       if (from >= nr_irqs || (from + cnt) > nr_irqs)
+               return;
+ 
++      mutex_lock(&sparse_irq_lock);
+       for (i = 0; i < cnt; i++)
+               free_desc(from + i);
+ 
+-      mutex_lock(&sparse_irq_lock);
+       bitmap_clear(allocated_irqs, from, cnt);
+       mutex_unlock(&sparse_irq_lock);
+ }
+@@ -703,19 +697,15 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned 
int cnt, int node,
+                                          from, cnt, 0);
+       ret = -EEXIST;
+       if (irq >=0 && start != irq)
+-              goto err;
++              goto unlock;
+ 
+       if (start + cnt > nr_irqs) {
+               ret = irq_expand_nr_irqs(start + cnt);
+               if (ret)
+-                      goto err;
++                      goto unlock;
+       }
+-
+-      bitmap_set(allocated_irqs, start, cnt);
+-      mutex_unlock(&sparse_irq_lock);
+-      return alloc_descs(start, cnt, node, affinity, owner);
+-
+-err:
++      ret = alloc_descs(start, cnt, node, affinity, owner);
++unlock:
+       mutex_unlock(&sparse_irq_lock);
+       return ret;
+ }
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 0db7c8a2afe2..af182a6df25b 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -457,14 +457,19 @@ static long seccomp_attach_filter(unsigned int flags,
+       return 0;
+ }
+ 
++void __get_seccomp_filter(struct seccomp_filter *filter)
++{
++      /* Reference count is bounded by the number of total processes. */
++      atomic_inc(&filter->usage);
++}
++
+ /* get_seccomp_filter - increments the reference count of the filter on @tsk 
*/
+ void get_seccomp_filter(struct task_struct *tsk)
+ {
+       struct seccomp_filter *orig = tsk->seccomp.filter;
+       if (!orig)
+               return;
+-      /* Reference count is bounded by the number of total processes. */
+-      atomic_inc(&orig->usage);
++      __get_seccomp_filter(orig);
+ }
+ 
+ static inline void seccomp_filter_free(struct seccomp_filter *filter)
+@@ -475,10 +480,8 @@ static inline void seccomp_filter_free(struct 
seccomp_filter *filter)
+       }
+ }
+ 
+-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
+-void put_seccomp_filter(struct task_struct *tsk)
++static void __put_seccomp_filter(struct seccomp_filter *orig)
+ {
+-      struct seccomp_filter *orig = tsk->seccomp.filter;
+       /* Clean up single-reference branches iteratively. */
+       while (orig && atomic_dec_and_test(&orig->usage)) {
+               struct seccomp_filter *freeme = orig;
+@@ -487,6 +490,12 @@ void put_seccomp_filter(struct task_struct *tsk)
+       }
+ }
+ 
++/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
++void put_seccomp_filter(struct task_struct *tsk)
++{
++      __put_seccomp_filter(tsk->seccomp.filter);
++}
++
+ /**
+  * seccomp_send_sigsys - signals the task to allow in-process syscall 
emulation
+  * @syscall: syscall number to send to userland
+@@ -892,13 +901,13 @@ long seccomp_get_filter(struct task_struct *task, 
unsigned long filter_off,
+       if (!data)
+               goto out;
+ 
+-      get_seccomp_filter(task);
++      __get_seccomp_filter(filter);
+       spin_unlock_irq(&task->sighand->siglock);
+ 
+       if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
+               ret = -EFAULT;
+ 
+-      put_seccomp_filter(task);
++      __put_seccomp_filter(filter);
+       return ret;
+ 
+ out:
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 265e0d0216e3..24d603d29512 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1189,6 +1189,8 @@ static struct ctl_table kern_table[] = {
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = timer_migration_handler,
++              .extra1         = &zero,
++              .extra2         = &one,
+       },
+ #endif
+ #ifdef CONFIG_BPF_SYSCALL
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index df445cde8a1e..7d670362891a 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -240,7 +240,7 @@ int timer_migration_handler(struct ctl_table *table, int 
write,
+       int ret;
+ 
+       mutex_lock(&mutex);
+-      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       if (!ret && write)
+               timers_update_migration(false);
+       mutex_unlock(&mutex);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f95bf81529f5..c1e50cc0d7b0 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3569,11 +3569,17 @@ static int tracing_open(struct inode *inode, struct 
file *file)
+       /* If this file was open for write, then erase contents */
+       if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+               int cpu = tracing_get_cpu(inode);
++              struct trace_buffer *trace_buf = &tr->trace_buffer;
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++              if (tr->current_trace->print_max)
++                      trace_buf = &tr->max_buffer;
++#endif
+ 
+               if (cpu == RING_BUFFER_ALL_CPUS)
+-                      tracing_reset_online_cpus(&tr->trace_buffer);
++                      tracing_reset_online_cpus(trace_buf);
+               else
+-                      tracing_reset(&tr->trace_buffer, cpu);
++                      tracing_reset(trace_buf, cpu);
+       }
+ 
+       if (file->f_mode & FMODE_READ) {
+@@ -5128,7 +5134,7 @@ static int tracing_wait_pipe(struct file *filp)
+                *
+                * iter->pos will be 0 if we haven't read anything.
+                */
+-              if (!tracing_is_on() && iter->pos)
++              if (!tracer_tracing_is_on(iter->tr) && iter->pos)
+                       break;
+ 
+               mutex_unlock(&iter->mutex);
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 37bec0f864b7..a7aa54f45e19 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -791,6 +791,7 @@ static int ieee80211_open(struct net_device *dev)
+ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+                             bool going_down)
+ {
++      struct ieee80211_sub_if_data *txq_sdata = sdata;
+       struct ieee80211_local *local = sdata->local;
+       struct fq *fq = &local->fq;
+       unsigned long flags;
+@@ -931,6 +932,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data 
*sdata,
+ 
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP_VLAN:
++              txq_sdata = container_of(sdata->bss,
++                                       struct ieee80211_sub_if_data, u.ap);
++
+               mutex_lock(&local->mtx);
+               list_del(&sdata->u.vlan.list);
+               mutex_unlock(&local->mtx);
+@@ -1001,8 +1005,17 @@ static void ieee80211_do_stop(struct 
ieee80211_sub_if_data *sdata,
+       }
+       spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ 
+-      if (sdata->vif.txq) {
+-              struct txq_info *txqi = to_txq_info(sdata->vif.txq);
++      if (txq_sdata->vif.txq) {
++              struct txq_info *txqi = to_txq_info(txq_sdata->vif.txq);
++
++              /*
++               * FIXME FIXME
++               *
++               * We really shouldn't purge the *entire* txqi since that
++               * contains frames for the other AP_VLANs (and possibly
++               * the AP itself) as well, but there's no API in FQ now
++               * to be able to filter.
++               */
+ 
+               spin_lock_bh(&fq->lock);
+               ieee80211_txq_purge(local, txqi);
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index eede5c6db8d5..30bba53c2992 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -707,6 +707,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local 
*local,
+       if (!cookie)
+               return -ENOENT;
+ 
++      flush_work(&local->hw_roc_start);
++
+       mutex_lock(&local->mtx);
+       list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+               if (!mgmt_tx && roc->cookie != cookie)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index dd190ff3daea..274c564bd9af 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1277,11 +1277,6 @@ static void ieee80211_set_skb_enqueue_time(struct 
sk_buff *skb)
+       IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
+ }
+ 
+-static void ieee80211_set_skb_vif(struct sk_buff *skb, struct txq_info *txqi)
+-{
+-      IEEE80211_SKB_CB(skb)->control.vif = txqi->txq.vif;
+-}
+-
+ static u32 codel_skb_len_func(const struct sk_buff *skb)
+ {
+       return skb->len;
+@@ -3388,6 +3383,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw 
*hw,
+       struct ieee80211_tx_info *info;
+       struct ieee80211_tx_data tx;
+       ieee80211_tx_result r;
++      struct ieee80211_vif *vif;
+ 
+       spin_lock_bh(&fq->lock);
+ 
+@@ -3404,8 +3400,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw 
*hw,
+       if (!skb)
+               goto out;
+ 
+-      ieee80211_set_skb_vif(skb, txqi);
+-
+       hdr = (struct ieee80211_hdr *)skb->data;
+       info = IEEE80211_SKB_CB(skb);
+ 
+@@ -3462,6 +3456,34 @@ struct sk_buff *ieee80211_tx_dequeue(struct 
ieee80211_hw *hw,
+               }
+       }
+ 
++      switch (tx.sdata->vif.type) {
++      case NL80211_IFTYPE_MONITOR:
++              if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
++                      vif = &tx.sdata->vif;
++                      break;
++              }
++              tx.sdata = rcu_dereference(local->monitor_sdata);
++              if (tx.sdata) {
++                      vif = &tx.sdata->vif;
++                      info->hw_queue =
++                              vif->hw_queue[skb_get_queue_mapping(skb)];
++              } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
++                      ieee80211_free_txskb(&local->hw, skb);
++                      goto begin;
++              } else {
++                      vif = NULL;
++              }
++              break;
++      case NL80211_IFTYPE_AP_VLAN:
++              tx.sdata = container_of(tx.sdata->bss,
++                                      struct ieee80211_sub_if_data, u.ap);
++              /* fall through */
++      default:
++              vif = &tx.sdata->vif;
++              break;
++      }
++
++      IEEE80211_SKB_CB(skb)->control.vif = vif;
+ out:
+       spin_unlock_bh(&fq->lock);
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index e9e9bc5c8773..ece0fbc08607 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -10385,6 +10385,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, 
struct genl_info *info)
+       if (err)
+               return err;
+ 
++      if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
++          !tb[NL80211_REKEY_DATA_KCK])
++              return -EINVAL;
+       if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != 
NL80211_REPLAY_CTR_LEN)
+               return -ERANGE;
+       if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
+diff --git a/security/keys/Kconfig b/security/keys/Kconfig
+index d942c7c2bc0a..e0a39781b10f 100644
+--- a/security/keys/Kconfig
++++ b/security/keys/Kconfig
+@@ -41,10 +41,8 @@ config BIG_KEYS
+       bool "Large payload keys"
+       depends on KEYS
+       depends on TMPFS
+-      depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
+       select CRYPTO_AES
+-      select CRYPTO_ECB
+-      select CRYPTO_RNG
++      select CRYPTO_GCM
+       help
+         This option provides support for holding large keys within the kernel
+         (for example Kerberos ticket caches).  The data may be stored out to
+diff --git a/security/keys/big_key.c b/security/keys/big_key.c
+index 835c1ab30d01..47c6dcab1a8e 100644
+--- a/security/keys/big_key.c
++++ b/security/keys/big_key.c
+@@ -1,5 +1,6 @@
+ /* Large capacity key type
+  *
++ * Copyright (C) 2017 Jason A. Donenfeld <[email protected]>. All Rights 
Reserved.
+  * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+  * Written by David Howells ([email protected])
+  *
+@@ -16,10 +17,10 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/err.h>
+ #include <linux/scatterlist.h>
++#include <linux/random.h>
+ #include <keys/user-type.h>
+ #include <keys/big_key-type.h>
+-#include <crypto/rng.h>
+-#include <crypto/skcipher.h>
++#include <crypto/aead.h>
+ 
+ /*
+  * Layout of key payload words.
+@@ -49,7 +50,12 @@ enum big_key_op {
+ /*
+  * Key size for big_key data encryption
+  */
+-#define ENC_KEY_SIZE  16
++#define ENC_KEY_SIZE 32
++
++/*
++ * Authentication tag length
++ */
++#define ENC_AUTHTAG_SIZE 16
+ 
+ /*
+  * big_key defined keys take an arbitrary string as the description and an
+@@ -64,57 +70,62 @@ struct key_type key_type_big_key = {
+       .destroy                = big_key_destroy,
+       .describe               = big_key_describe,
+       .read                   = big_key_read,
++      /* no ->update(); don't add it without changing big_key_crypt() nonce */
+ };
+ 
+ /*
+- * Crypto names for big_key data encryption
++ * Crypto names for big_key data authenticated encryption
+  */
+-static const char big_key_rng_name[] = "stdrng";
+-static const char big_key_alg_name[] = "ecb(aes)";
++static const char big_key_alg_name[] = "gcm(aes)";
+ 
+ /*
+- * Crypto algorithms for big_key data encryption
++ * Crypto algorithms for big_key data authenticated encryption
+  */
+-static struct crypto_rng *big_key_rng;
+-static struct crypto_skcipher *big_key_skcipher;
++static struct crypto_aead *big_key_aead;
+ 
+ /*
+- * Generate random key to encrypt big_key data
++ * Since changing the key affects the entire object, we need a mutex.
+  */
+-static inline int big_key_gen_enckey(u8 *key)
+-{
+-      return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
+-}
++static DEFINE_MUTEX(big_key_aead_lock);
+ 
+ /*
+  * Encrypt/decrypt big_key data
+  */
+ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 
*key)
+ {
+-      int ret = -EINVAL;
++      int ret;
+       struct scatterlist sgio;
+-      SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher);
+-
+-      if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) {
++      struct aead_request *aead_req;
++      /* We always use a zero nonce. The reason we can get away with this is
++       * because we're using a different randomly generated key for every
++       * different encryption. Notably, too, key_type_big_key doesn't define
++       * an .update function, so there's no chance we'll wind up reusing the
++       * key to encrypt updated data. Simply put: one key, one encryption.
++       */
++      u8 zero_nonce[crypto_aead_ivsize(big_key_aead)];
++
++      aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
++      if (!aead_req)
++              return -ENOMEM;
++
++      memset(zero_nonce, 0, sizeof(zero_nonce));
++      sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? 
ENC_AUTHTAG_SIZE : 0));
++      aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
++      aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, 
NULL);
++      aead_request_set_ad(aead_req, 0);
++
++      mutex_lock(&big_key_aead_lock);
++      if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
+               ret = -EAGAIN;
+               goto error;
+       }
+-
+-      skcipher_request_set_tfm(req, big_key_skcipher);
+-      skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+-                                    NULL, NULL);
+-
+-      sg_init_one(&sgio, data, datalen);
+-      skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL);
+-
+       if (op == BIG_KEY_ENC)
+-              ret = crypto_skcipher_encrypt(req);
++              ret = crypto_aead_encrypt(aead_req);
+       else
+-              ret = crypto_skcipher_decrypt(req);
+-
+-      skcipher_request_zero(req);
+-
++              ret = crypto_aead_decrypt(aead_req);
+ error:
++      mutex_unlock(&big_key_aead_lock);
++      aead_request_free(aead_req);
+       return ret;
+ }
+ 
+@@ -146,15 +157,13 @@ int big_key_preparse(struct key_preparsed_payload *prep)
+                *
+                * File content is stored encrypted with randomly generated key.
+                */
+-              size_t enclen = ALIGN(datalen, 
crypto_skcipher_blocksize(big_key_skcipher));
++              size_t enclen = datalen + ENC_AUTHTAG_SIZE;
+ 
+-              /* prepare aligned data to encrypt */
+               data = kmalloc(enclen, GFP_KERNEL);
+               if (!data)
+                       return -ENOMEM;
+ 
+               memcpy(data, prep->data, datalen);
+-              memset(data + datalen, 0x00, enclen - datalen);
+ 
+               /* generate random key */
+               enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
+@@ -162,13 +171,10 @@ int big_key_preparse(struct key_preparsed_payload *prep)
+                       ret = -ENOMEM;
+                       goto error;
+               }
+-
+-              ret = big_key_gen_enckey(enckey);
+-              if (ret)
+-                      goto err_enckey;
++              get_random_bytes(enckey, ENC_KEY_SIZE);
+ 
+               /* encrypt aligned data */
+-              ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
++              ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
+               if (ret)
+                       goto err_enckey;
+ 
+@@ -194,7 +200,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
+               *path = file->f_path;
+               path_get(path);
+               fput(file);
+-              kfree(data);
++              kzfree(data);
+       } else {
+               /* Just store the data in a buffer */
+               void *data = kmalloc(datalen, GFP_KERNEL);
+@@ -210,9 +216,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
+ err_fput:
+       fput(file);
+ err_enckey:
+-      kfree(enckey);
++      kzfree(enckey);
+ error:
+-      kfree(data);
++      kzfree(data);
+       return ret;
+ }
+ 
+@@ -226,7 +232,7 @@ void big_key_free_preparse(struct key_preparsed_payload 
*prep)
+ 
+               path_put(path);
+       }
+-      kfree(prep->payload.data[big_key_data]);
++      kzfree(prep->payload.data[big_key_data]);
+ }
+ 
+ /*
+@@ -258,7 +264,7 @@ void big_key_destroy(struct key *key)
+               path->mnt = NULL;
+               path->dentry = NULL;
+       }
+-      kfree(key->payload.data[big_key_data]);
++      kzfree(key->payload.data[big_key_data]);
+       key->payload.data[big_key_data] = NULL;
+ }
+ 
+@@ -294,7 +300,7 @@ long big_key_read(const struct key *key, char __user 
*buffer, size_t buflen)
+               struct file *file;
+               u8 *data;
+               u8 *enckey = (u8 *)key->payload.data[big_key_data];
+-              size_t enclen = ALIGN(datalen, 
crypto_skcipher_blocksize(big_key_skcipher));
++              size_t enclen = datalen + ENC_AUTHTAG_SIZE;
+ 
+               data = kmalloc(enclen, GFP_KERNEL);
+               if (!data)
+@@ -326,7 +332,7 @@ long big_key_read(const struct key *key, char __user 
*buffer, size_t buflen)
+ err_fput:
+               fput(file);
+ error:
+-              kfree(data);
++              kzfree(data);
+       } else {
+               ret = datalen;
+               if (copy_to_user(buffer, key->payload.data[big_key_data],
+@@ -342,47 +348,31 @@ long big_key_read(const struct key *key, char __user 
*buffer, size_t buflen)
+  */
+ static int __init big_key_init(void)
+ {
+-      struct crypto_skcipher *cipher;
+-      struct crypto_rng *rng;
+       int ret;
+ 
+-      rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+-      if (IS_ERR(rng)) {
+-              pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
+-              return PTR_ERR(rng);
+-      }
+-
+-      big_key_rng = rng;
+-
+-      /* seed RNG */
+-      ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
+-      if (ret) {
+-              pr_err("Can't reset rng: %d\n", ret);
+-              goto error_rng;
+-      }
+-
+       /* init block cipher */
+-      cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+-      if (IS_ERR(cipher)) {
+-              ret = PTR_ERR(cipher);
++      big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
++      if (IS_ERR(big_key_aead)) {
++              ret = PTR_ERR(big_key_aead);
+               pr_err("Can't alloc crypto: %d\n", ret);
+-              goto error_rng;
++              return ret;
++      }
++      ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
++      if (ret < 0) {
++              pr_err("Can't set crypto auth tag len: %d\n", ret);
++              goto free_aead;
+       }
+-
+-      big_key_skcipher = cipher;
+ 
+       ret = register_key_type(&key_type_big_key);
+       if (ret < 0) {
+               pr_err("Can't register type: %d\n", ret);
+-              goto error_cipher;
++              goto free_aead;
+       }
+ 
+       return 0;
+ 
+-error_cipher:
+-      crypto_free_skcipher(big_key_skcipher);
+-error_rng:
+-      crypto_free_rng(big_key_rng);
++free_aead:
++      crypto_free_aead(big_key_aead);
+       return ret;
+ }
+ 
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index a705a7d92ad7..fb0c65049c19 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -137,7 +137,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+ extern key_ref_t search_my_process_keyrings(struct keyring_search_context 
*ctx);
+ extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
+ 
+-extern struct key *find_keyring_by_name(const char *name, bool 
skip_perm_check);
++extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
+ 
+ extern int install_user_keyrings(void);
+ extern int install_thread_keyring_to_cred(struct cred *);
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 2f4ce35ae2aa..135e1eb7e468 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -301,6 +301,8 @@ struct key *key_alloc(struct key_type *type, const char 
*desc,
+               key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+       if (flags & KEY_ALLOC_BUILT_IN)
+               key->flags |= 1 << KEY_FLAG_BUILTIN;
++      if (flags & KEY_ALLOC_UID_KEYRING)
++              key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+ 
+ #ifdef KEY_DEBUGGING
+       key->magic = KEY_DEBUG_MAGIC;
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index ada12c3e3ac4..1302cb398346 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -766,6 +766,11 @@ long keyctl_read_key(key_serial_t keyid, char __user 
*buffer, size_t buflen)
+ 
+       key = key_ref_to_ptr(key_ref);
+ 
++      if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
++              ret = -ENOKEY;
++              goto error2;
++      }
++
+       /* see if we can read it directly */
+       ret = key_permission(key_ref, KEY_NEED_READ);
+       if (ret == 0)
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index c91e4e0cea08..a86d0ae1773c 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -416,7 +416,7 @@ static void keyring_describe(const struct key *keyring, 
struct seq_file *m)
+ }
+ 
+ struct keyring_read_iterator_context {
+-      size_t                  qty;
++      size_t                  buflen;
+       size_t                  count;
+       key_serial_t __user     *buffer;
+ };
+@@ -428,9 +428,9 @@ static int keyring_read_iterator(const void *object, void 
*data)
+       int ret;
+ 
+       kenter("{%s,%d},,{%zu/%zu}",
+-             key->type->name, key->serial, ctx->count, ctx->qty);
++             key->type->name, key->serial, ctx->count, ctx->buflen);
+ 
+-      if (ctx->count >= ctx->qty)
++      if (ctx->count >= ctx->buflen)
+               return 1;
+ 
+       ret = put_user(key->serial, ctx->buffer);
+@@ -465,16 +465,12 @@ static long keyring_read(const struct key *keyring,
+               return 0;
+ 
+       /* Calculate how much data we could return */
+-      ctx.qty = nr_keys * sizeof(key_serial_t);
+-
+       if (!buffer || !buflen)
+-              return ctx.qty;
+-
+-      if (buflen > ctx.qty)
+-              ctx.qty = buflen;
++              return nr_keys * sizeof(key_serial_t);
+ 
+       /* Copy the IDs of the subscribed keys into the buffer */
+       ctx.buffer = (key_serial_t __user *)buffer;
++      ctx.buflen = buflen;
+       ctx.count = 0;
+       ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+       if (ret < 0) {
+@@ -989,15 +985,15 @@ key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ /*
+  * Find a keyring with the specified name.
+  *
+- * All named keyrings in the current user namespace are searched, provided 
they
+- * grant Search permission directly to the caller (unless this check is
+- * skipped).  Keyrings whose usage points have reached zero or who have been
+- * revoked are skipped.
++ * Only keyrings that have nonzero refcount, are not revoked, and are owned 
by a
++ * user in the current user namespace are considered.  If @uid_keyring is 
%true,
++ * the keyring additionally must have been allocated as a user or user session
++ * keyring; otherwise, it must grant Search permission directly to the caller.
+  *
+  * Returns a pointer to the keyring with the keyring's refcount having being
+  * incremented on success.  -ENOKEY is returned if a key could not be found.
+  */
+-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
++struct key *find_keyring_by_name(const char *name, bool uid_keyring)
+ {
+       struct key *keyring;
+       int bucket;
+@@ -1025,10 +1021,15 @@ struct key *find_keyring_by_name(const char *name, 
bool skip_perm_check)
+                       if (strcmp(keyring->description, name) != 0)
+                               continue;
+ 
+-                      if (!skip_perm_check &&
+-                          key_permission(make_key_ref(keyring, 0),
+-                                         KEY_NEED_SEARCH) < 0)
+-                              continue;
++                      if (uid_keyring) {
++                              if (!test_bit(KEY_FLAG_UID_KEYRING,
++                                            &keyring->flags))
++                                      continue;
++                      } else {
++                              if (key_permission(make_key_ref(keyring, 0),
++                                                 KEY_NEED_SEARCH) < 0)
++                                      continue;
++                      }
+ 
+                       /* we've got a match but we might end up racing with
+                        * key_cleanup() if the keyring is currently 'dead'
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 45536c677b05..ce45c78cf0a2 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -76,7 +76,8 @@ int install_user_keyrings(void)
+               if (IS_ERR(uid_keyring)) {
+                       uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
+                                                   cred, user_keyring_perm,
+-                                                  KEY_ALLOC_IN_QUOTA,
++                                                  KEY_ALLOC_UID_KEYRING |
++                                                      KEY_ALLOC_IN_QUOTA,
+                                                   NULL, NULL);
+                       if (IS_ERR(uid_keyring)) {
+                               ret = PTR_ERR(uid_keyring);
+@@ -93,7 +94,8 @@ int install_user_keyrings(void)
+                       session_keyring =
+                               keyring_alloc(buf, user->uid, INVALID_GID,
+                                             cred, user_keyring_perm,
+-                                            KEY_ALLOC_IN_QUOTA,
++                                            KEY_ALLOC_UID_KEYRING |
++                                                KEY_ALLOC_IN_QUOTA,
+                                             NULL, NULL);
+                       if (IS_ERR(session_keyring)) {
+                               ret = PTR_ERR(session_keyring);
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c 
b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 03f1fa495d74..cbb0564c0ec4 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -6,10 +6,18 @@
+  */
+ 
+ #include <sys/types.h>
+-#include <asm/siginfo.h>
+-#define __have_siginfo_t 1
+-#define __have_sigval_t 1
+-#define __have_sigevent_t 1
++
++/*
++ * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
++ * we need to use the kernel's siginfo.h file and trick glibc
++ * into accepting it.
++ */
++#if !__GLIBC_PREREQ(2, 26)
++# include <asm/siginfo.h>
++# define __have_siginfo_t 1
++# define __have_sigval_t 1
++# define __have_sigevent_t 1
++#endif
+ 
+ #include <errno.h>
+ #include <linux/filter.h>
+@@ -676,7 +684,7 @@ TEST_F_SIGNAL(TRAP, ign, SIGSYS)
+       syscall(__NR_getpid);
+ }
+ 
+-static struct siginfo TRAP_info;
++static siginfo_t TRAP_info;
+ static volatile int TRAP_nr;
+ static void TRAP_action(int nr, siginfo_t *info, void *void_context)
+ {

Reply via email to