commit:     d08a1764ec3b4b4d175474453e1aaf3c26e65f63
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Dec  9 23:29:22 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Dec  9 23:29:22 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d08a1764

Linux patch 4.9.68

 0000_README             |    4 +
 1067_linux-4.9.68.patch | 3370 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3374 insertions(+)

diff --git a/0000_README b/0000_README
index baf7aeb..7f2750d 100644
--- a/0000_README
+++ b/0000_README
@@ -311,6 +311,10 @@ Patch:  1066_linux-4.9.67.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.67
 
+Patch:  1067_linux-4.9.68.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.68
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1067_linux-4.9.68.patch b/1067_linux-4.9.68.patch
new file mode 100644
index 0000000..d132406
--- /dev/null
+++ b/1067_linux-4.9.68.patch
@@ -0,0 +1,3370 @@
+diff --git a/Makefile b/Makefile
+index 70546af61a0a..dfe17af517b2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 67
++SUBLEVEL = 68
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
+index f6ba589cd312..c821c1d5610e 100644
+--- a/arch/arm/mach-omap1/dma.c
++++ b/arch/arm/mach-omap1/dma.c
+@@ -32,7 +32,6 @@
+ #include "soc.h"
+ 
+ #define OMAP1_DMA_BASE                        (0xfffed800)
+-#define OMAP1_LOGICAL_DMA_CH_COUNT    17
+ 
+ static u32 enable_1510_mode;
+ 
+@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
+               goto exit_iounmap;
+       }
+ 
+-      d->lch_count            = OMAP1_LOGICAL_DMA_CH_COUNT;
+-
+       /* Valid attributes for omap1 plus processors */
+       if (cpu_is_omap15xx())
+               d->dev_caps = ENABLE_1510_MODE;
+@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
+       d->dev_caps             |= CLEAR_CSR_ON_READ;
+       d->dev_caps             |= IS_WORD_16;
+ 
+-      if (cpu_is_omap15xx())
+-              d->chan_count = 9;
+-      else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+-              if (!(d->dev_caps & ENABLE_1510_MODE))
+-                      d->chan_count = 16;
++      /* available logical channels */
++      if (cpu_is_omap15xx()) {
++              d->lch_count = 9;
++      } else {
++              if (d->dev_caps & ENABLE_1510_MODE)
++                      d->lch_count = 9;
+               else
+-                      d->chan_count = 9;
++                      d->lch_count = 16;
+       }
+ 
+       p = dma_plat_info;
+diff --git a/arch/arm/mach-omap2/pdata-quirks.c 
b/arch/arm/mach-omap2/pdata-quirks.c
+index da310bb779b9..88676fe9b119 100644
+--- a/arch/arm/mach-omap2/pdata-quirks.c
++++ b/arch/arm/mach-omap2/pdata-quirks.c
+@@ -147,7 +147,7 @@ static struct ti_st_plat_data wilink_pdata = {
+       .nshutdown_gpio = 137,
+       .dev_name = "/dev/ttyO1",
+       .flow_cntrl = 1,
+-      .baud_rate = 300000,
++      .baud_rate = 3000000,
+ };
+ 
+ static struct platform_device wl18xx_device = {
+diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
+index 87131cd3bc8f..6d3a50446b21 100644
+--- a/arch/m68k/mm/mcfmmu.c
++++ b/arch/m68k/mm/mcfmmu.c
+@@ -169,7 +169,7 @@ void __init cf_bootmem_alloc(void)
+       max_pfn = max_low_pfn = PFN_DOWN(_ramend);
+       high_memory = (void *)_ramend;
+ 
+-      m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
++      m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
+       module_fixup(NULL, __start_fixup, __stop_fixup);
+ 
+       /* setup bootmem data */
+diff --git a/arch/powerpc/include/asm/book3s/64/hash.h 
b/arch/powerpc/include/asm/book3s/64/hash.h
+index f61cad3de4e6..4c935f7504f7 100644
+--- a/arch/powerpc/include/asm/book3s/64/hash.h
++++ b/arch/powerpc/include/asm/book3s/64/hash.h
+@@ -201,6 +201,10 @@ extern int __meminit 
hash__vmemmap_create_mapping(unsigned long start,
+                                             unsigned long phys);
+ extern void hash__vmemmap_remove_mapping(unsigned long start,
+                                    unsigned long page_size);
++
++int hash__create_section_mapping(unsigned long start, unsigned long end);
++int hash__remove_section_mapping(unsigned long start, unsigned long end);
++
+ #endif /* !__ASSEMBLY__ */
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
+diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
+index 78dabf065ba9..bd666287c5ed 100644
+--- a/arch/powerpc/mm/hash_utils_64.c
++++ b/arch/powerpc/mm/hash_utils_64.c
+@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
+ }
+ 
+ #ifdef CONFIG_MEMORY_HOTPLUG
+-int create_section_mapping(unsigned long start, unsigned long end)
++int hash__create_section_mapping(unsigned long start, unsigned long end)
+ {
+       int rc = htab_bolt_mapping(start, end, __pa(start),
+                                  pgprot_val(PAGE_KERNEL), mmu_linear_psize,
+@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned 
long end)
+       return rc;
+ }
+ 
+-int remove_section_mapping(unsigned long start, unsigned long end)
++int hash__remove_section_mapping(unsigned long start, unsigned long end)
+ {
+       int rc = htab_remove_mapping(start, end, mmu_linear_psize,
+                                    mmu_kernel_ssize);
+diff --git a/arch/powerpc/mm/pgtable-book3s64.c 
b/arch/powerpc/mm/pgtable-book3s64.c
+index f4f437cbabf1..0fad7f6742ff 100644
+--- a/arch/powerpc/mm/pgtable-book3s64.c
++++ b/arch/powerpc/mm/pgtable-book3s64.c
+@@ -125,3 +125,21 @@ void mmu_cleanup_all(void)
+       else if (mmu_hash_ops.hpte_clear_all)
+               mmu_hash_ops.hpte_clear_all();
+ }
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++int create_section_mapping(unsigned long start, unsigned long end)
++{
++      if (radix_enabled())
++              return -ENODEV;
++
++      return hash__create_section_mapping(start, end);
++}
++
++int remove_section_mapping(unsigned long start, unsigned long end)
++{
++      if (radix_enabled())
++              return -ENODEV;
++
++      return hash__remove_section_mapping(start, end);
++}
++#endif /* CONFIG_MEMORY_HOTPLUG */
+diff --git a/arch/s390/include/asm/pci_insn.h 
b/arch/s390/include/asm/pci_insn.h
+index 649eb62c52b3..9e02cb7955c1 100644
+--- a/arch/s390/include/asm/pci_insn.h
++++ b/arch/s390/include/asm/pci_insn.h
+@@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
+ int zpci_load(u64 *data, u64 req, u64 offset);
+ int zpci_store(u64 data, u64 req, u64 offset);
+ int zpci_store_block(const u64 *data, u64 req, u64 offset);
+-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
++int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+ 
+ #endif
+diff --git a/arch/s390/include/asm/runtime_instr.h 
b/arch/s390/include/asm/runtime_instr.h
+index 402ad6df4897..c54a9310d814 100644
+--- a/arch/s390/include/asm/runtime_instr.h
++++ b/arch/s390/include/asm/runtime_instr.h
+@@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb 
*cb_next,
+               load_runtime_instr_cb(&runtime_instr_empty_cb);
+ }
+ 
+-void exit_thread_runtime_instr(void);
++struct task_struct;
++
++void runtime_instr_release(struct task_struct *tsk);
+ 
+ #endif /* _RUNTIME_INSTR_H */
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 172fe1121d99..8382fc62cde6 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -70,8 +70,6 @@ extern void kernel_thread_starter(void);
+  */
+ void exit_thread(struct task_struct *tsk)
+ {
+-      if (tsk == current)
+-              exit_thread_runtime_instr();
+ }
+ 
+ void flush_thread(void)
+@@ -84,6 +82,7 @@ void release_thread(struct task_struct *dead_task)
+ 
+ void arch_release_task_struct(struct task_struct *tsk)
+ {
++      runtime_instr_release(tsk);
+ }
+ 
+ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+diff --git a/arch/s390/kernel/runtime_instr.c 
b/arch/s390/kernel/runtime_instr.c
+index 70cdb03d4acd..fd03a7569e10 100644
+--- a/arch/s390/kernel/runtime_instr.c
++++ b/arch/s390/kernel/runtime_instr.c
+@@ -18,11 +18,24 @@
+ /* empty control block to disable RI by loading it */
+ struct runtime_instr_cb runtime_instr_empty_cb;
+ 
++void runtime_instr_release(struct task_struct *tsk)
++{
++      kfree(tsk->thread.ri_cb);
++}
++
+ static void disable_runtime_instr(void)
+ {
+-      struct pt_regs *regs = task_pt_regs(current);
++      struct task_struct *task = current;
++      struct pt_regs *regs;
+ 
++      if (!task->thread.ri_cb)
++              return;
++      regs = task_pt_regs(task);
++      preempt_disable();
+       load_runtime_instr_cb(&runtime_instr_empty_cb);
++      kfree(task->thread.ri_cb);
++      task->thread.ri_cb = NULL;
++      preempt_enable();
+ 
+       /*
+        * Make sure the RI bit is deleted from the PSW. If the user did not
+@@ -43,19 +56,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb 
*cb)
+       cb->valid = 1;
+ }
+ 
+-void exit_thread_runtime_instr(void)
+-{
+-      struct task_struct *task = current;
+-
+-      preempt_disable();
+-      if (!task->thread.ri_cb)
+-              return;
+-      disable_runtime_instr();
+-      kfree(task->thread.ri_cb);
+-      task->thread.ri_cb = NULL;
+-      preempt_enable();
+-}
+-
+ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
+ {
+       struct runtime_instr_cb *cb;
+@@ -64,7 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
+               return -EOPNOTSUPP;
+ 
+       if (command == S390_RUNTIME_INSTR_STOP) {
+-              exit_thread_runtime_instr();
++              disable_runtime_instr();
+               return 0;
+       }
+ 
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 15ffc19c8c0c..03a1d5976ff5 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -354,7 +354,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
+                               /* End of second scan with interrupts on. */
+                               break;
+                       /* First scan complete, reenable interrupts. */
+-                      zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
++                      if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, 
PCI_ISC))
++                              break;
+                       si = 0;
+                       continue;
+               }
+@@ -928,7 +929,7 @@ static int __init pci_base_init(void)
+       if (!s390_pci_probe)
+               return 0;
+ 
+-      if (!test_facility(69) || !test_facility(71) || !test_facility(72))
++      if (!test_facility(69) || !test_facility(71))
+               return 0;
+ 
+       rc = zpci_debug_init();
+diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
+index fa8d7d4b9751..248146dcfce3 100644
+--- a/arch/s390/pci/pci_insn.c
++++ b/arch/s390/pci/pci_insn.c
+@@ -7,6 +7,7 @@
+ #include <linux/export.h>
+ #include <linux/errno.h>
+ #include <linux/delay.h>
++#include <asm/facility.h>
+ #include <asm/pci_insn.h>
+ #include <asm/pci_debug.h>
+ #include <asm/processor.h>
+@@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
+ }
+ 
+ /* Set Interruption Controls */
+-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
++int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+ {
++      if (!test_facility(72))
++              return -EIO;
+       asm volatile (
+               "       .insn   rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
+               : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
++      return 0;
+ }
+ 
+ /* PCI Load */
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index be202390bbd3..9dfeeeca0ea8 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs 
*iregs)
+                       continue;
+ 
+               /* log dropped samples number */
+-              if (error[bit])
++              if (error[bit]) {
+                       perf_log_lost_samples(event, error[bit]);
+ 
++                      if (perf_event_account_interrupt(event))
++                              x86_pmu_stop(event, 0);
++              }
++
+               if (counts[bit]) {
+                       __intel_pmu_pebs_event(event, iregs, base,
+                                              top, bit, counts[bit]);
+diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
+index 91dfcafe27a6..bad25bb80679 100644
+--- a/arch/x86/include/asm/syscalls.h
++++ b/arch/x86/include/asm/syscalls.h
+@@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, 
int);
+ asmlinkage long sys_iopl(unsigned int);
+ 
+ /* kernel/ldt.c */
+-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
++asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
+ 
+ /* kernel/signal.c */
+ asmlinkage long sys_rt_sigreturn(void);
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 095ef7ddd6ae..abfbb61b18b8 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1077,6 +1077,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user 
*ubuf,
+        * Add back in the features that came in from userspace:
+        */
+       xsave->header.xfeatures |= xfeatures;
++      xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | 
xsave->header.xfeatures;
+ 
+       return 0;
+ }
+diff --git a/arch/x86/kernel/kprobes/ftrace.c 
b/arch/x86/kernel/kprobes/ftrace.c
+index 5f8f0b3cc674..2c0b0b645a74 100644
+--- a/arch/x86/kernel/kprobes/ftrace.c
++++ b/arch/x86/kernel/kprobes/ftrace.c
+@@ -26,7 +26,7 @@
+ #include "common.h"
+ 
+ static nokprobe_inline
+-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
++void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                     struct kprobe_ctlblk *kcb, unsigned long orig_ip)
+ {
+       /*
+@@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs 
*regs,
+       __this_cpu_write(current_kprobe, NULL);
+       if (orig_ip)
+               regs->ip = orig_ip;
+-      return 1;
+ }
+ 
+ int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                   struct kprobe_ctlblk *kcb)
+ {
+-      if (kprobe_ftrace(p))
+-              return __skip_singlestep(p, regs, kcb, 0);
+-      else
+-              return 0;
++      if (kprobe_ftrace(p)) {
++              __skip_singlestep(p, regs, kcb, 0);
++              preempt_enable_no_resched();
++              return 1;
++      }
++      return 0;
+ }
+ NOKPROBE_SYMBOL(skip_singlestep);
+ 
+-/* Ftrace callback handler for kprobes */
++/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+                          struct ftrace_ops *ops, struct pt_regs *regs)
+ {
+@@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
parent_ip,
+               /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
+               regs->ip = ip + sizeof(kprobe_opcode_t);
+ 
++              /* To emulate trap based kprobes, preempt_disable here */
++              preempt_disable();
+               __this_cpu_write(current_kprobe, p);
+               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+-              if (!p->pre_handler || !p->pre_handler(p, regs))
++              if (!p->pre_handler || !p->pre_handler(p, regs)) {
+                       __skip_singlestep(p, regs, kcb, orig_ip);
++                      preempt_enable_no_resched();
++              }
+               /*
+                * If pre_handler returns !0, it sets regs->ip and
+-               * resets current kprobe.
++               * resets current kprobe, and keep preempt count +1.
+                */
+       }
+ end:
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index 6707039b9032..5f70014ca602 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -12,6 +12,7 @@
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
++#include <linux/syscalls.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+@@ -271,8 +272,8 @@ static int write_ldt(void __user *ptr, unsigned long 
bytecount, int oldmode)
+       return error;
+ }
+ 
+-asmlinkage int sys_modify_ldt(int func, void __user *ptr,
+-                            unsigned long bytecount)
++SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
++              unsigned long , bytecount)
+ {
+       int ret = -ENOSYS;
+ 
+@@ -290,5 +291,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
+               ret = write_ldt(ptr, bytecount, 0);
+               break;
+       }
+-      return ret;
++      /*
++       * The SYSCALL_DEFINE() macros give us an 'unsigned long'
++       * return type, but tht ABI for sys_modify_ldt() expects
++       * 'int'.  This cast gives us an int-sized value in %rax
++       * for the return code.  The 'unsigned' is necessary so
++       * the compiler does not try to sign-extend the negative
++       * return codes into the high half of the register when
++       * taking the value from int->long.
++       */
++      return (unsigned int)ret;
+ }
+diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
+index 836a1eb5df43..3ee234b6234d 100644
+--- a/arch/x86/um/ldt.c
++++ b/arch/x86/um/ldt.c
+@@ -6,6 +6,7 @@
+ #include <linux/mm.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+ #include <os.h>
+@@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm)
+       mm->arch.ldt.entry_count = 0;
+ }
+ 
+-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
++              unsigned long , bytecount)
+ {
+-      return do_modify_ldt_skas(func, ptr, bytecount);
++      /* See non-um modify_ldt() for why we do this cast */
++      return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
+ }
+diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
+index 5d4c05074a5c..e2bcacc1a921 100644
+--- a/drivers/crypto/caam/intern.h
++++ b/drivers/crypto/caam/intern.h
+@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
+       struct device           *dev;
+       int ridx;
+       struct caam_job_ring __iomem *rregs;    /* JobR's register space */
++      struct tasklet_struct irqtask;
+       int irq;                        /* One per queue */
+ 
+       /* Number of scatterlist crypt transforms active on the JobR */
+diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
+index 757c27f9953d..9e7f28122bb7 100644
+--- a/drivers/crypto/caam/jr.c
++++ b/drivers/crypto/caam/jr.c
+@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
+ 
+       ret = caam_reset_hw_jr(dev);
+ 
++      tasklet_kill(&jrp->irqtask);
++
+       /* Release interrupt */
+       free_irq(jrp->irq, dev);
+ 
+@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+ 
+       /*
+        * Check the output ring for ready responses, kick
+-       * the threaded irq if jobs done.
++       * tasklet if jobs done.
+        */
+       irqstate = rd_reg32(&jrp->rregs->jrintstatus);
+       if (!irqstate)
+@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void 
*st_dev)
+       /* Have valid interrupt at this point, just ACK and trigger */
+       wr_reg32(&jrp->rregs->jrintstatus, irqstate);
+ 
+-      return IRQ_WAKE_THREAD;
++      preempt_disable();
++      tasklet_schedule(&jrp->irqtask);
++      preempt_enable();
++
++      return IRQ_HANDLED;
+ }
+ 
+-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
++/* Deferred service handler, run as interrupt-fired tasklet */
++static void caam_jr_dequeue(unsigned long devarg)
+ {
+       int hw_idx, sw_idx, i, head, tail;
+-      struct device *dev = st_dev;
++      struct device *dev = (struct device *)devarg;
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+       void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+       u32 *userdesc, userstatus;
+@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+ 
+       /* reenable / unmask IRQs */
+       clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+-
+-      return IRQ_HANDLED;
+ }
+ 
+ /**
+@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
+ 
+       jrp = dev_get_drvdata(dev);
+ 
++      tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
++
+       /* Connect job ring interrupt handler. */
+-      error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
+-                                   caam_jr_threadirq, IRQF_SHARED,
+-                                   dev_name(dev), dev);
++      error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
++                          dev_name(dev), dev);
+       if (error) {
+               dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+                       jrp->ridx, jrp->irq);
+@@ -454,6 +460,7 @@ static int caam_jr_init(struct device *dev)
+ out_free_irq:
+       free_irq(jrp->irq, dev);
+ out_kill_deq:
++      tasklet_kill(&jrp->irqtask);
+       return error;
+ }
+ 
+diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
+index 4d51f9e83fa8..04bf29808200 100644
+--- a/drivers/dma-buf/fence.c
++++ b/drivers/dma-buf/fence.c
+@@ -280,6 +280,31 @@ int fence_add_callback(struct fence *fence, struct 
fence_cb *cb,
+ }
+ EXPORT_SYMBOL(fence_add_callback);
+ 
++/**
++ * fence_get_status - returns the status upon completion
++ * @fence: [in]       the fence to query
++ *
++ * This wraps fence_get_status_locked() to return the error status
++ * condition on a signaled fence. See fence_get_status_locked() for more
++ * details.
++ *
++ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
++ * been signaled without an error condition, or a negative error code
++ * if the fence has been completed in err.
++ */
++int fence_get_status(struct fence *fence)
++{
++      unsigned long flags;
++      int status;
++
++      spin_lock_irqsave(fence->lock, flags);
++      status = fence_get_status_locked(fence);
++      spin_unlock_irqrestore(fence->lock, flags);
++
++      return status;
++}
++EXPORT_SYMBOL(fence_get_status);
++
+ /**
+  * fence_remove_callback - remove a callback from the signaling list
+  * @fence:    [in]    the fence to wait on
+@@ -526,6 +551,7 @@ fence_init(struct fence *fence, const struct fence_ops 
*ops,
+       fence->context = context;
+       fence->seqno = seqno;
+       fence->flags = 0UL;
++      fence->error = 0;
+ 
+       trace_fence_init(fence);
+ }
+diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
+index 62e8e6dc7953..4f3511415b29 100644
+--- a/drivers/dma-buf/sw_sync.c
++++ b/drivers/dma-buf/sw_sync.c
+@@ -96,9 +96,9 @@ struct sync_timeline *sync_timeline_create(const char *name)
+       obj->context = fence_context_alloc(1);
+       strlcpy(obj->name, name, sizeof(obj->name));
+ 
+-      INIT_LIST_HEAD(&obj->child_list_head);
+-      INIT_LIST_HEAD(&obj->active_list_head);
+-      spin_lock_init(&obj->child_list_lock);
++      obj->pt_tree = RB_ROOT;
++      INIT_LIST_HEAD(&obj->pt_list);
++      spin_lock_init(&obj->lock);
+ 
+       sync_timeline_debug_add(obj);
+ 
+@@ -125,68 +125,6 @@ static void sync_timeline_put(struct sync_timeline *obj)
+       kref_put(&obj->kref, sync_timeline_free);
+ }
+ 
+-/**
+- * sync_timeline_signal() - signal a status change on a sync_timeline
+- * @obj:      sync_timeline to signal
+- * @inc:      num to increment on timeline->value
+- *
+- * A sync implementation should call this any time one of it's fences
+- * has signaled or has an error condition.
+- */
+-static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+-{
+-      unsigned long flags;
+-      struct sync_pt *pt, *next;
+-
+-      trace_sync_timeline(obj);
+-
+-      spin_lock_irqsave(&obj->child_list_lock, flags);
+-
+-      obj->value += inc;
+-
+-      list_for_each_entry_safe(pt, next, &obj->active_list_head,
+-                               active_list) {
+-              if (fence_is_signaled_locked(&pt->base))
+-                      list_del_init(&pt->active_list);
+-      }
+-
+-      spin_unlock_irqrestore(&obj->child_list_lock, flags);
+-}
+-
+-/**
+- * sync_pt_create() - creates a sync pt
+- * @parent:   fence's parent sync_timeline
+- * @size:     size to allocate for this pt
+- * @inc:      value of the fence
+- *
+- * Creates a new sync_pt as a child of @parent.  @size bytes will be
+- * allocated allowing for implementation specific data to be kept after
+- * the generic sync_timeline struct. Returns the sync_pt object or
+- * NULL in case of error.
+- */
+-static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
+-                           unsigned int value)
+-{
+-      unsigned long flags;
+-      struct sync_pt *pt;
+-
+-      if (size < sizeof(*pt))
+-              return NULL;
+-
+-      pt = kzalloc(size, GFP_KERNEL);
+-      if (!pt)
+-              return NULL;
+-
+-      spin_lock_irqsave(&obj->child_list_lock, flags);
+-      sync_timeline_get(obj);
+-      fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+-                 obj->context, value);
+-      list_add_tail(&pt->child_list, &obj->child_list_head);
+-      INIT_LIST_HEAD(&pt->active_list);
+-      spin_unlock_irqrestore(&obj->child_list_lock, flags);
+-      return pt;
+-}
+-
+ static const char *timeline_fence_get_driver_name(struct fence *fence)
+ {
+       return "sw_sync";
+@@ -203,13 +141,17 @@ static void timeline_fence_release(struct fence *fence)
+ {
+       struct sync_pt *pt = fence_to_sync_pt(fence);
+       struct sync_timeline *parent = fence_parent(fence);
+-      unsigned long flags;
+ 
+-      spin_lock_irqsave(fence->lock, flags);
+-      list_del(&pt->child_list);
+-      if (!list_empty(&pt->active_list))
+-              list_del(&pt->active_list);
+-      spin_unlock_irqrestore(fence->lock, flags);
++      if (!list_empty(&pt->link)) {
++              unsigned long flags;
++
++              spin_lock_irqsave(fence->lock, flags);
++              if (!list_empty(&pt->link)) {
++                      list_del(&pt->link);
++                      rb_erase(&pt->node, &parent->pt_tree);
++              }
++              spin_unlock_irqrestore(fence->lock, flags);
++      }
+ 
+       sync_timeline_put(parent);
+       fence_free(fence);
+@@ -219,18 +161,11 @@ static bool timeline_fence_signaled(struct fence *fence)
+ {
+       struct sync_timeline *parent = fence_parent(fence);
+ 
+-      return (fence->seqno > parent->value) ? false : true;
++      return !__fence_is_later(fence->seqno, parent->value);
+ }
+ 
+ static bool timeline_fence_enable_signaling(struct fence *fence)
+ {
+-      struct sync_pt *pt = fence_to_sync_pt(fence);
+-      struct sync_timeline *parent = fence_parent(fence);
+-
+-      if (timeline_fence_signaled(fence))
+-              return false;
+-
+-      list_add_tail(&pt->active_list, &parent->active_list_head);
+       return true;
+ }
+ 
+@@ -259,6 +194,107 @@ static const struct fence_ops timeline_fence_ops = {
+       .timeline_value_str = timeline_fence_timeline_value_str,
+ };
+ 
++/**
++ * sync_timeline_signal() - signal a status change on a sync_timeline
++ * @obj:      sync_timeline to signal
++ * @inc:      num to increment on timeline->value
++ *
++ * A sync implementation should call this any time one of it's fences
++ * has signaled or has an error condition.
++ */
++static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
++{
++      struct sync_pt *pt, *next;
++
++      trace_sync_timeline(obj);
++
++      spin_lock_irq(&obj->lock);
++
++      obj->value += inc;
++
++      list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
++              if (!timeline_fence_signaled(&pt->base))
++                      break;
++
++              list_del_init(&pt->link);
++              rb_erase(&pt->node, &obj->pt_tree);
++
++              /*
++               * A signal callback may release the last reference to this
++               * fence, causing it to be freed. That operation has to be
++               * last to avoid a use after free inside this loop, and must
++               * be after we remove the fence from the timeline in order to
++               * prevent deadlocking on timeline->lock inside
++               * timeline_fence_release().
++               */
++              fence_signal_locked(&pt->base);
++      }
++
++      spin_unlock_irq(&obj->lock);
++}
++
++/**
++ * sync_pt_create() - creates a sync pt
++ * @parent:   fence's parent sync_timeline
++ * @inc:      value of the fence
++ *
++ * Creates a new sync_pt as a child of @parent.  @size bytes will be
++ * allocated allowing for implementation specific data to be kept after
++ * the generic sync_timeline struct. Returns the sync_pt object or
++ * NULL in case of error.
++ */
++static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
++                                    unsigned int value)
++{
++      struct sync_pt *pt;
++
++      pt = kzalloc(sizeof(*pt), GFP_KERNEL);
++      if (!pt)
++              return NULL;
++
++      sync_timeline_get(obj);
++      fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
++                 obj->context, value);
++      INIT_LIST_HEAD(&pt->link);
++
++      spin_lock_irq(&obj->lock);
++      if (!fence_is_signaled_locked(&pt->base)) {
++              struct rb_node **p = &obj->pt_tree.rb_node;
++              struct rb_node *parent = NULL;
++
++              while (*p) {
++                      struct sync_pt *other;
++                      int cmp;
++
++                      parent = *p;
++                      other = rb_entry(parent, typeof(*pt), node);
++                      cmp = value - other->base.seqno;
++                      if (cmp > 0) {
++                              p = &parent->rb_right;
++                      } else if (cmp < 0) {
++                              p = &parent->rb_left;
++                      } else {
++                              if (fence_get_rcu(&other->base)) {
++                                      fence_put(&pt->base);
++                                      pt = other;
++                                      goto unlock;
++                              }
++                              p = &parent->rb_left;
++                      }
++              }
++              rb_link_node(&pt->node, parent, p);
++              rb_insert_color(&pt->node, &obj->pt_tree);
++
++              parent = rb_next(&pt->node);
++              list_add_tail(&pt->link,
++                            parent ? &rb_entry(parent, typeof(*pt), 
node)->link : &obj->pt_list);
++      }
++unlock:
++      spin_unlock_irq(&obj->lock);
++
++      return pt;
++}
++
+ /*
+  * *WARNING*
+  *
+@@ -285,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, 
struct file *file)
+ static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
+ {
+       struct sync_timeline *obj = file->private_data;
++      struct sync_pt *pt, *next;
++
++      spin_lock_irq(&obj->lock);
++
++      list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
++              fence_set_error(&pt->base, -ENOENT);
++              fence_signal_locked(&pt->base);
++      }
+ 
+-      smp_wmb();
++      spin_unlock_irq(&obj->lock);
+ 
+       sync_timeline_put(obj);
+       return 0;
+@@ -309,7 +353,7 @@ static long sw_sync_ioctl_create_fence(struct 
sync_timeline *obj,
+               goto err;
+       }
+ 
+-      pt = sync_pt_create(obj, sizeof(*pt), data.value);
++      pt = sync_pt_create(obj, data.value);
+       if (!pt) {
+               err = -ENOMEM;
+               goto err;
+@@ -345,6 +389,11 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, 
unsigned long arg)
+       if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+               return -EFAULT;
+ 
++      while (value > INT_MAX)  {
++              sync_timeline_signal(obj, INT_MAX);
++              value -= INT_MAX;
++      }
++
+       sync_timeline_signal(obj, value);
+ 
+       return 0;
+diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
+index 2dd4c3db6caa..858263dbecd4 100644
+--- a/drivers/dma-buf/sync_debug.c
++++ b/drivers/dma-buf/sync_debug.c
+@@ -62,29 +62,29 @@ void sync_file_debug_remove(struct sync_file *sync_file)
+ 
+ static const char *sync_status_str(int status)
+ {
+-      if (status == 0)
+-              return "signaled";
++      if (status < 0)
++              return "error";
+ 
+       if (status > 0)
+-              return "active";
++              return "signaled";
+ 
+-      return "error";
++      return "active";
+ }
+ 
+-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool 
show)
++static void sync_print_fence(struct seq_file *s,
++                           struct fence *fence, bool show)
+ {
+-      int status = 1;
+       struct sync_timeline *parent = fence_parent(fence);
++      int status;
+ 
+-      if (fence_is_signaled_locked(fence))
+-              status = fence->status;
++      status = fence_get_status_locked(fence);
+ 
+       seq_printf(s, "  %s%sfence %s",
+                  show ? parent->name : "",
+                  show ? "_" : "",
+                  sync_status_str(status));
+ 
+-      if (status <= 0) {
++      if (status) {
+               struct timespec64 ts64 =
+                       ktime_to_timespec64(fence->timestamp);
+ 
+@@ -116,17 +116,15 @@ static void sync_print_fence(struct seq_file *s, struct 
fence *fence, bool show)
+ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+ {
+       struct list_head *pos;
+-      unsigned long flags;
+ 
+       seq_printf(s, "%s: %d\n", obj->name, obj->value);
+ 
+-      spin_lock_irqsave(&obj->child_list_lock, flags);
+-      list_for_each(pos, &obj->child_list_head) {
+-              struct sync_pt *pt =
+-                      container_of(pos, struct sync_pt, child_list);
++      spin_lock_irq(&obj->lock);
++      list_for_each(pos, &obj->pt_list) {
++              struct sync_pt *pt = container_of(pos, struct sync_pt, link);
+               sync_print_fence(s, &pt->base, false);
+       }
+-      spin_unlock_irqrestore(&obj->child_list_lock, flags);
++      spin_unlock_irq(&obj->lock);
+ }
+ 
+ static void sync_print_sync_file(struct seq_file *s,
+@@ -135,7 +133,7 @@ static void sync_print_sync_file(struct seq_file *s,
+       int i;
+ 
+       seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
+-                 sync_status_str(!fence_is_signaled(sync_file->fence)));
++                 sync_status_str(fence_get_status(sync_file->fence)));
+ 
+       if (fence_is_array(sync_file->fence)) {
+               struct fence_array *array = to_fence_array(sync_file->fence);
+@@ -149,12 +147,11 @@ static void sync_print_sync_file(struct seq_file *s,
+ 
+ static int sync_debugfs_show(struct seq_file *s, void *unused)
+ {
+-      unsigned long flags;
+       struct list_head *pos;
+ 
+       seq_puts(s, "objs:\n--------------\n");
+ 
+-      spin_lock_irqsave(&sync_timeline_list_lock, flags);
++      spin_lock_irq(&sync_timeline_list_lock);
+       list_for_each(pos, &sync_timeline_list_head) {
+               struct sync_timeline *obj =
+                       container_of(pos, struct sync_timeline,
+@@ -163,11 +160,11 @@ static int sync_debugfs_show(struct seq_file *s, void 
*unused)
+               sync_print_obj(s, obj);
+               seq_puts(s, "\n");
+       }
+-      spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
++      spin_unlock_irq(&sync_timeline_list_lock);
+ 
+       seq_puts(s, "fences:\n--------------\n");
+ 
+-      spin_lock_irqsave(&sync_file_list_lock, flags);
++      spin_lock_irq(&sync_file_list_lock);
+       list_for_each(pos, &sync_file_list_head) {
+               struct sync_file *sync_file =
+                       container_of(pos, struct sync_file, sync_file_list);
+@@ -175,7 +172,7 @@ static int sync_debugfs_show(struct seq_file *s, void 
*unused)
+               sync_print_sync_file(s, sync_file);
+               seq_puts(s, "\n");
+       }
+-      spin_unlock_irqrestore(&sync_file_list_lock, flags);
++      spin_unlock_irq(&sync_file_list_lock);
+       return 0;
+ }
+ 
+diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
+index d269aa6783aa..9615dc0385b5 100644
+--- a/drivers/dma-buf/sync_debug.h
++++ b/drivers/dma-buf/sync_debug.h
+@@ -14,6 +14,7 @@
+ #define _LINUX_SYNC_H
+ 
+ #include <linux/list.h>
++#include <linux/rbtree.h>
+ #include <linux/spinlock.h>
+ #include <linux/fence.h>
+ 
+@@ -24,43 +25,41 @@
+  * struct sync_timeline - sync object
+  * @kref:             reference count on fence.
+  * @name:             name of the sync_timeline. Useful for debugging
+- * @child_list_head:  list of children sync_pts for this sync_timeline
+- * @child_list_lock:  lock protecting @child_list_head and fence.status
+- * @active_list_head: list of active (unsignaled/errored) sync_pts
++ * @lock:             lock protecting @pt_list and @value
++ * @pt_tree:          rbtree of active (unsignaled/errored) sync_pts
++ * @pt_list:          list of active (unsignaled/errored) sync_pts
+  * @sync_timeline_list:       membership in global sync_timeline_list
+  */
+ struct sync_timeline {
+       struct kref             kref;
+       char                    name[32];
+ 
+-      /* protected by child_list_lock */
++      /* protected by lock */
+       u64                     context;
+       int                     value;
+ 
+-      struct list_head        child_list_head;
+-      spinlock_t              child_list_lock;
+-
+-      struct list_head        active_list_head;
++      struct rb_root          pt_tree;
++      struct list_head        pt_list;
++      spinlock_t              lock;
+ 
+       struct list_head        sync_timeline_list;
+ };
+ 
+ static inline struct sync_timeline *fence_parent(struct fence *fence)
+ {
+-      return container_of(fence->lock, struct sync_timeline,
+-                          child_list_lock);
++      return container_of(fence->lock, struct sync_timeline, lock);
+ }
+ 
+ /**
+  * struct sync_pt - sync_pt object
+  * @base: base fence object
+- * @child_list: sync timeline child's list
+- * @active_list: sync timeline active child's list
++ * @link: link on the sync timeline's list
++ * @node: node in the sync timeline's tree
+  */
+ struct sync_pt {
+       struct fence base;
+-      struct list_head child_list;
+-      struct list_head active_list;
++      struct list_head link;
++      struct rb_node node;
+ };
+ 
+ #ifdef CONFIG_SW_SYNC
+diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
+index b29a9e817320..f0c374d6ab40 100644
+--- a/drivers/dma-buf/sync_file.c
++++ b/drivers/dma-buf/sync_file.c
+@@ -67,9 +67,10 @@ static void fence_check_cb_func(struct fence *f, struct 
fence_cb *cb)
+  * sync_file_create() - creates a sync file
+  * @fence:    fence to add to the sync_fence
+  *
+- * Creates a sync_file containg @fence. Once this is called, the sync_file
+- * takes ownership of @fence. The sync_file can be released with
+- * fput(sync_file->file). Returns the sync_file or NULL in case of error.
++ * Creates a sync_file containg @fence. This function acquires and additional
++ * reference of @fence for the newly-created &sync_file, if it succeeds. The
++ * sync_file can be released with fput(sync_file->file). Returns the
++ * sync_file or NULL in case of error.
+  */
+ struct sync_file *sync_file_create(struct fence *fence)
+ {
+@@ -79,7 +80,7 @@ struct sync_file *sync_file_create(struct fence *fence)
+       if (!sync_file)
+               return NULL;
+ 
+-      sync_file->fence = fence;
++      sync_file->fence = fence_get(fence);
+ 
+       snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
+                fence->ops->get_driver_name(fence),
+@@ -90,13 +91,6 @@ struct sync_file *sync_file_create(struct fence *fence)
+ }
+ EXPORT_SYMBOL(sync_file_create);
+ 
+-/**
+- * sync_file_fdget() - get a sync_file from an fd
+- * @fd:               fd referencing a fence
+- *
+- * Ensures @fd references a valid sync_file, increments the refcount of the
+- * backing file. Returns the sync_file or NULL in case of error.
+- */
+ static struct sync_file *sync_file_fdget(int fd)
+ {
+       struct file *file = fget(fd);
+@@ -377,10 +371,8 @@ static void sync_fill_fence_info(struct fence *fence,
+               sizeof(info->obj_name));
+       strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
+               sizeof(info->driver_name));
+-      if (fence_is_signaled(fence))
+-              info->status = fence->status >= 0 ? 1 : fence->status;
+-      else
+-              info->status = 0;
++
++      info->status = fence_get_status(fence);
+       info->timestamp_ns = ktime_to_ns(fence->timestamp);
+ }
+ 
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 9f3dbc8c63d2..fb2e7476d96b 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -1694,7 +1694,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int 
i)
+ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
+ {
+       struct pl330_thread *thrd = NULL;
+-      unsigned long flags;
+       int chans, i;
+ 
+       if (pl330->state == DYING)
+@@ -1702,8 +1701,6 @@ static struct pl330_thread *pl330_request_channel(struct 
pl330_dmac *pl330)
+ 
+       chans = pl330->pcfg.num_chan;
+ 
+-      spin_lock_irqsave(&pl330->lock, flags);
+-
+       for (i = 0; i < chans; i++) {
+               thrd = &pl330->channels[i];
+               if ((thrd->free) && (!_manager_ns(thrd) ||
+@@ -1721,8 +1718,6 @@ static struct pl330_thread *pl330_request_channel(struct 
pl330_dmac *pl330)
+               thrd = NULL;
+       }
+ 
+-      spin_unlock_irqrestore(&pl330->lock, flags);
+-
+       return thrd;
+ }
+ 
+@@ -1740,7 +1735,6 @@ static inline void _free_event(struct pl330_thread 
*thrd, int ev)
+ static void pl330_release_channel(struct pl330_thread *thrd)
+ {
+       struct pl330_dmac *pl330;
+-      unsigned long flags;
+ 
+       if (!thrd || thrd->free)
+               return;
+@@ -1752,10 +1746,8 @@ static void pl330_release_channel(struct pl330_thread 
*thrd)
+ 
+       pl330 = thrd->dmac;
+ 
+-      spin_lock_irqsave(&pl330->lock, flags);
+       _free_event(thrd, thrd->ev);
+       thrd->free = true;
+-      spin_unlock_irqrestore(&pl330->lock, flags);
+ }
+ 
+ /* Initialize the structure for PL330 configuration, that can be used
+@@ -2120,20 +2112,20 @@ static int pl330_alloc_chan_resources(struct dma_chan 
*chan)
+       struct pl330_dmac *pl330 = pch->dmac;
+       unsigned long flags;
+ 
+-      spin_lock_irqsave(&pch->lock, flags);
++      spin_lock_irqsave(&pl330->lock, flags);
+ 
+       dma_cookie_init(chan);
+       pch->cyclic = false;
+ 
+       pch->thread = pl330_request_channel(pl330);
+       if (!pch->thread) {
+-              spin_unlock_irqrestore(&pch->lock, flags);
++              spin_unlock_irqrestore(&pl330->lock, flags);
+               return -ENOMEM;
+       }
+ 
+       tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
+ 
+-      spin_unlock_irqrestore(&pch->lock, flags);
++      spin_unlock_irqrestore(&pl330->lock, flags);
+ 
+       return 1;
+ }
+@@ -2236,12 +2228,13 @@ static int pl330_pause(struct dma_chan *chan)
+ static void pl330_free_chan_resources(struct dma_chan *chan)
+ {
+       struct dma_pl330_chan *pch = to_pchan(chan);
++      struct pl330_dmac *pl330 = pch->dmac;
+       unsigned long flags;
+ 
+       tasklet_kill(&pch->task);
+ 
+       pm_runtime_get_sync(pch->dmac->ddma.dev);
+-      spin_lock_irqsave(&pch->lock, flags);
++      spin_lock_irqsave(&pl330->lock, flags);
+ 
+       pl330_release_channel(pch->thread);
+       pch->thread = NULL;
+@@ -2249,7 +2242,7 @@ static void pl330_free_chan_resources(struct dma_chan 
*chan)
+       if (pch->cyclic)
+               list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
+ 
+-      spin_unlock_irqrestore(&pch->lock, flags);
++      spin_unlock_irqrestore(&pl330->lock, flags);
+       pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+       pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
+ }
+diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
+index 307547f4848d..ae3f60be7759 100644
+--- a/drivers/dma/stm32-dma.c
++++ b/drivers/dma/stm32-dma.c
+@@ -884,7 +884,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan 
*c,
+       struct virt_dma_desc *vdesc;
+       enum dma_status status;
+       unsigned long flags;
+-      u32 residue;
++      u32 residue = 0;
+ 
+       status = dma_cookie_status(c, cookie, state);
+       if ((status == DMA_COMPLETE) || (!state))
+@@ -892,16 +892,12 @@ static enum dma_status stm32_dma_tx_status(struct 
dma_chan *c,
+ 
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       vdesc = vchan_find_desc(&chan->vchan, cookie);
+-      if (cookie == chan->desc->vdesc.tx.cookie) {
++      if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
+               residue = stm32_dma_desc_residue(chan, chan->desc,
+                                                chan->next_sg);
+-      } else if (vdesc) {
++      else if (vdesc)
+               residue = stm32_dma_desc_residue(chan,
+                                                to_stm32_dma_desc(vdesc), 0);
+-      } else {
+-              residue = 0;
+-      }
+-
+       dma_set_residue(state, residue);
+ 
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+@@ -976,21 +972,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct 
of_phandle_args *dma_spec,
+       struct stm32_dma_chan *chan;
+       struct dma_chan *c;
+ 
+-      if (dma_spec->args_count < 3)
++      if (dma_spec->args_count < 4)
+               return NULL;
+ 
+       cfg.channel_id = dma_spec->args[0];
+       cfg.request_line = dma_spec->args[1];
+       cfg.stream_config = dma_spec->args[2];
+-      cfg.threshold = 0;
++      cfg.threshold = dma_spec->args[3];
+ 
+       if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
+                               STM32_DMA_MAX_REQUEST_ID))
+               return NULL;
+ 
+-      if (dma_spec->args_count > 3)
+-              cfg.threshold = dma_spec->args[3];
+-
+       chan = &dmadev->chan[cfg.channel_id];
+ 
+       c = dma_get_slave_channel(&chan->vchan.chan);
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 54775221a01f..3c47e6361d81 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -2510,6 +2510,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info 
*mci,
+                       break;
+               case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
+                       pvt->pci_ta = pdev;
++                      break;
+               case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
+                       pvt->pci_ras = pdev;
+                       break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c 
b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+index c2bd9f045532..6d75fd0e3105 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+@@ -565,11 +565,8 @@ static const struct drm_encoder_helper_funcs 
dce_virtual_encoder_helper_funcs =
+ 
+ static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
+ {
+-      struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+-
+-      kfree(amdgpu_encoder->enc_priv);
+       drm_encoder_cleanup(encoder);
+-      kfree(amdgpu_encoder);
++      kfree(encoder);
+ }
+ 
+ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index 50f0cf2788b7..7522f796f19b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -182,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct 
amdgpu_device *adev,
+               WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+ 
+               data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+-              data &= ~0xffc00000;
++              data &= ~0x3ff;
+               WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+ 
+               data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c 
b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+index 6ca1f3117fe8..6dd09c306bc1 100644
+--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+@@ -46,7 +46,8 @@ enum decon_flag_bits {
+       BIT_CLKS_ENABLED,
+       BIT_IRQS_ENABLED,
+       BIT_WIN_UPDATED,
+-      BIT_SUSPENDED
++      BIT_SUSPENDED,
++      BIT_REQUEST_UPDATE
+ };
+ 
+ struct decon_context {
+@@ -315,6 +316,7 @@ static void decon_update_plane(struct exynos_drm_crtc 
*crtc,
+ 
+       /* window enable */
+       decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
++      set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
+ }
+ 
+ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+@@ -327,6 +329,7 @@ static void decon_disable_plane(struct exynos_drm_crtc 
*crtc,
+               return;
+ 
+       decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
++      set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
+ }
+ 
+ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
+@@ -340,8 +343,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc 
*crtc)
+       for (i = ctx->first_win; i < WINDOWS_NR; i++)
+               decon_shadow_protect_win(ctx, i, false);
+ 
+-      /* standalone update */
+-      decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
++      if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
++              decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+ 
+       if (ctx->out_type & IFTYPE_I80)
+               set_bit(BIT_WIN_UPDATED, &ctx->flags);
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c 
b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+index cc2fde2ae5ef..c9eef0f51d31 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+@@ -243,7 +243,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
+               return PTR_ERR(fsl_dev->state);
+       }
+ 
+-      clk_disable_unprepare(fsl_dev->pix_clk);
+       clk_disable_unprepare(fsl_dev->clk);
+ 
+       return 0;
+@@ -266,6 +265,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
+       if (fsl_dev->tcon)
+               fsl_tcon_bypass_enable(fsl_dev->tcon);
+       fsl_dcu_drm_init_planes(fsl_dev->drm);
++      enable_irq(fsl_dev->irq);
+       drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
+ 
+       console_lock();
+@@ -273,7 +273,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
+       console_unlock();
+ 
+       drm_kms_helper_poll_enable(fsl_dev->drm);
+-      enable_irq(fsl_dev->irq);
+ 
+       return 0;
+ }
+diff --git a/drivers/i2c/busses/i2c-cadence.c 
b/drivers/i2c/busses/i2c-cadence.c
+index 686971263bef..45d6771fac8c 100644
+--- a/drivers/i2c/busses/i2c-cadence.c
++++ b/drivers/i2c/busses/i2c-cadence.c
+@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
+               goto err_clk_dis;
+       }
+ 
+-      ret = i2c_add_adapter(&id->adap);
+-      if (ret < 0)
+-              goto err_clk_dis;
+-
+       /*
+        * Cadence I2C controller has a bug wherein it generates
+        * invalid read transaction after HW timeout in master receiver mode.
+@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
+        */
+       cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+ 
++      ret = i2c_add_adapter(&id->adap);
++      if (ret < 0)
++              goto err_clk_dis;
++
+       dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
+                id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
+ 
+diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
+index 472641fc890c..af05e20c986b 100644
+--- a/drivers/iio/adc/ti-ads1015.c
++++ b/drivers/iio/adc/ti-ads1015.c
+@@ -269,6 +269,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int 
chan, int *val)
+ 
+               conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
+               conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
++              conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
+               usleep_range(conv_time, conv_time + 1);
+               data->conv_invalid = false;
+       }
+diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c 
b/drivers/infiniband/hw/qedr/qedr_cm.c
+index 63890ebb72bd..eccf7039aaca 100644
+--- a/drivers/infiniband/hw/qedr/qedr_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_cm.c
+@@ -404,9 +404,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev 
*dev,
+       }
+ 
+       if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+-              packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+-      else
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
++      else
++              packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+ 
+       packet->roce_mode = roce_mode;
+       memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+diff --git a/drivers/infiniband/hw/qedr/verbs.c 
b/drivers/infiniband/hw/qedr/verbs.c
+index 4ba019e3dc56..35d5b89decb4 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -1653,7 +1653,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
+       int status = 0;
+ 
+       if (new_state == qp->state)
+-              return 1;
++              return 0;
+ 
+       switch (qp->state) {
+       case QED_ROCE_QP_STATE_RESET:
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index e0f1c6d534fe..ab8a1b36af21 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -703,7 +703,14 @@ static void cached_dev_read_error(struct closure *cl)
+       struct search *s = container_of(cl, struct search, cl);
+       struct bio *bio = &s->bio.bio;
+ 
+-      if (s->recoverable) {
++      /*
++       * If read request hit dirty data (s->read_dirty_data is true),
++       * then recovery a failed read request from cached device may
++       * get a stale data back. So read failure recovery is only
++       * permitted when read request hit clean data in cache device,
++       * or when cache read race happened.
++       */
++      if (s->recoverable && !s->read_dirty_data) {
+               /* Retry from the backing device: */
+               trace_bcache_read_retry(s->orig_bio);
+ 
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 90ed2e12d345..80c89a31d790 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -642,6 +642,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
+                              CORE_VENDOR_SPEC_CAPABILITIES0);
+       }
+ 
++      /*
++       * Power on reset state may trigger power irq if previous status of
++       * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
++       * interrupt in GIC, any pending power irq interrupt should be
++       * acknowledged. Otherwise power irq interrupt handler would be
++       * fired prematurely.
++       */
++      sdhci_msm_voltage_switch(host);
++
++      /*
++       * Ensure that above writes are propogated before interrupt enablement
++       * in GIC.
++       */
++      mb();
++
+       /* Setup IRQ for handling power/voltage tasks with PMIC */
+       msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
+       if (msm_host->pwr_irq < 0) {
+@@ -651,6 +666,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
+               goto clk_disable;
+       }
+ 
++      /* Enable pwr irq interrupts */
++      writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
++
+       ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
+                                       sdhci_msm_pwr_irq, IRQF_ONESHOT,
+                                       dev_name(&pdev->dev), host);
+diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
+index e90c6a7333d7..2e4649655181 100644
+--- a/drivers/net/appletalk/ipddp.c
++++ b/drivers/net/appletalk/ipddp.c
+@@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct 
net_device *dev)
+  */
+ static int ipddp_create(struct ipddp_route *new_rt)
+ {
+-        struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
++        struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ 
+         if (rt == NULL)
+                 return -ENOMEM;
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c 
b/drivers/net/ethernet/broadcom/bcmsysport.c
+index be7ec5a76a54..744ed6ddaf37 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1023,15 +1023,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+               goto out;
+       }
+ 
+-      /* Insert TSB and checksum infos */
+-      if (priv->tsb_en) {
+-              skb = bcm_sysport_insert_tsb(skb, dev);
+-              if (!skb) {
+-                      ret = NETDEV_TX_OK;
+-                      goto out;
+-              }
+-      }
+-
+       /* The Ethernet switch we are interfaced with needs packets to be at
+        * least 64 bytes (including FCS) otherwise they will be discarded when
+        * they enter the switch port logic. When Broadcom tags are enabled, we
+@@ -1039,13 +1030,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff 
*skb,
+        * (including FCS and tag) because the length verification is done after
+        * the Broadcom tag is stripped off the ingress packet.
+        */
+-      if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
++      if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+ 
+-      skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
+-                      ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
++      /* Insert TSB and checksum infos */
++      if (priv->tsb_en) {
++              skb = bcm_sysport_insert_tsb(skb, dev);
++              if (!skb) {
++                      ret = NETDEV_TX_OK;
++                      goto out;
++              }
++      }
++
++      skb_len = skb->len;
+ 
+       mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(kdev, mapping)) {
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c 
b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+index 67befedef709..578c7f8f11bf 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
+       int speed = 2;
+ 
+       if (!xcv) {
+-              dev_err(&xcv->pdev->dev,
+-                      "XCV init not done, probe may have failed\n");
++              pr_err("XCV init not done, probe may have failed\n");
+               return;
+       }
+ 
+diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c 
b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+index 0f0de5b63622..d04a6c163445 100644
+--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
++++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+@@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
+               if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+                       fl6.flowi6_oif = sin6_scope_id;
+               dst = ip6_route_output(&init_net, NULL, &fl6);
+-              if (!dst)
+-                      goto out;
+-              if (!cxgb_our_interface(lldi, get_real_dev,
+-                                      ip6_dst_idev(dst)->dev) &&
+-                  !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
++              if (dst->error ||
++                  (!cxgb_our_interface(lldi, get_real_dev,
++                                       ip6_dst_idev(dst)->dev) &&
++                   !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
+                       dst_release(dst);
+-                      dst = NULL;
++                      return NULL;
+               }
+       }
+ 
+-out:
+       return dst;
+ }
+ EXPORT_SYMBOL(cxgb_find_route6);
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c 
b/drivers/net/ethernet/emulex/benet/be_main.c
+index 5626908f3f7a..1644896568c4 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 
*mac)
+ 
+       /* Check if mac has already been added as part of uc-list */
+       for (i = 0; i < adapter->uc_macs; i++) {
+-              if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
+-                                   mac)) {
++              if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
+                       /* mac already added, skip addition */
+                       adapter->pmac_id[0] = adapter->pmac_id[i + 1];
+                       return 0;
+@@ -363,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, 
void *p)
+               status = -EPERM;
+               goto err;
+       }
+-done:
++
++      /* Remember currently programmed MAC */
+       ether_addr_copy(adapter->dev_mac, addr->sa_data);
++done:
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+       dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
+       return 0;
+@@ -1679,14 +1680,12 @@ static void be_clear_mc_list(struct be_adapter 
*adapter)
+ 
+ static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
+ {
+-      if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+-                           adapter->dev_mac)) {
++      if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
+               adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
+               return 0;
+       }
+ 
+-      return be_cmd_pmac_add(adapter,
+-                             (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
++      return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
+                              adapter->if_handle,
+                              &adapter->pmac_id[uc_idx + 1], 0);
+ }
+@@ -1722,9 +1721,8 @@ static void be_set_uc_list(struct be_adapter *adapter)
+       }
+ 
+       if (adapter->update_uc_list) {
+-              i = 1; /* First slot is claimed by the Primary MAC */
+-
+               /* cache the uc-list in adapter array */
++              i = 0;
+               netdev_for_each_uc_addr(ha, netdev) {
+                       ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
+                       i++;
+@@ -3639,8 +3637,10 @@ static void be_disable_if_filters(struct be_adapter 
*adapter)
+ {
+       /* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
+       if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+-          check_privilege(adapter, BE_PRIV_FILTMGMT))
++          check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+               be_dev_mac_del(adapter, adapter->pmac_id[0]);
++              eth_zero_addr(adapter->dev_mac);
++      }
+ 
+       be_clear_uc_list(adapter);
+       be_clear_mc_list(adapter);
+@@ -3794,12 +3794,27 @@ static int be_enable_if_filters(struct be_adapter 
*adapter)
+       if (status)
+               return status;
+ 
+-      /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
+-      if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+-          check_privilege(adapter, BE_PRIV_FILTMGMT)) {
++      /* Normally this condition usually true as the ->dev_mac is zeroed.
++       * But on BE3 VFs the initial MAC is pre-programmed by PF and
++       * subsequent be_dev_mac_add() can fail (after fresh boot)
++       */
++      if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
++              int old_pmac_id = -1;
++
++              /* Remember old programmed MAC if any - can happen on BE3 VF */
++              if (!is_zero_ether_addr(adapter->dev_mac))
++                      old_pmac_id = adapter->pmac_id[0];
++
+               status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
+               if (status)
+                       return status;
++
++              /* Delete the old programmed MAC as we successfully programmed
++               * a new MAC
++               */
++              if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
++                      be_dev_mac_del(adapter, old_pmac_id);
++
+               ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
+       }
+ 
+@@ -4573,6 +4588,10 @@ static int be_mac_setup(struct be_adapter *adapter)
+ 
+               memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+               memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
++
++              /* Initial MAC for BE3 VFs is already programmed by PF */
++              if (BEx_chip(adapter) && be_virtfn(adapter))
++                      memcpy(adapter->dev_mac, mac, ETH_ALEN);
+       }
+ 
+       return 0;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c 
b/drivers/net/ethernet/freescale/fec_main.c
+index 12aef1b15356..849b8712ec81 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2923,6 +2923,7 @@ static void set_multicast_list(struct net_device *ndev)
+       struct netdev_hw_addr *ha;
+       unsigned int i, bit, data, crc, tmp;
+       unsigned char hash;
++      unsigned int hash_high = 0, hash_low = 0;
+ 
+       if (ndev->flags & IFF_PROMISC) {
+               tmp = readl(fep->hwp + FEC_R_CNTRL);
+@@ -2945,11 +2946,7 @@ static void set_multicast_list(struct net_device *ndev)
+               return;
+       }
+ 
+-      /* Clear filter and add the addresses in hash register
+-       */
+-      writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+-      writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+-
++      /* Add the addresses in hash register */
+       netdev_for_each_mc_addr(ha, ndev) {
+               /* calculate crc32 value of mac address */
+               crc = 0xffffffff;
+@@ -2967,16 +2964,14 @@ static void set_multicast_list(struct net_device *ndev)
+                */
+               hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
+ 
+-              if (hash > 31) {
+-                      tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+-                      tmp |= 1 << (hash - 32);
+-                      writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+-              } else {
+-                      tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+-                      tmp |= 1 << hash;
+-                      writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+-              }
++              if (hash > 31)
++                      hash_high |= 1 << (hash - 32);
++              else
++                      hash_low |= 1 << hash;
+       }
++
++      writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
++      writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ }
+ 
+ /* Set a MAC change in hardware. */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c 
b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index d4d97ca12e83..f9897d17f01d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -251,13 +251,9 @@ static u32 freq_to_shift(u16 freq)
+ {
+       u32 freq_khz = freq * 1000;
+       u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+-      u64 tmp_rounded =
+-              roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
+-              roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
+-      u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
+-              max_val_cycles : tmp_rounded;
++      u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
+       /* calculate max possible multiplier in order to fit in 64bit */
+-      u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
++      u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
+ 
+       /* This comes from the reverse of clocksource_khz2mult */
+       return ilog2(div_u64(max_mul * freq_khz, 1000000));
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c 
b/drivers/net/ethernet/renesas/ravb_main.c
+index 11623aad0e8e..10d3a9f6349e 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -941,14 +941,10 @@ static int ravb_poll(struct napi_struct *napi, int 
budget)
+       /* Receive error message handling */
+       priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
+       priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+-      if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
++      if (priv->rx_over_errors != ndev->stats.rx_over_errors)
+               ndev->stats.rx_over_errors = priv->rx_over_errors;
+-              netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
+-      }
+-      if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
++      if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
+               ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+-              netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
+-      }
+ out:
+       return budget - quota;
+ }
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index cebde074d196..cb206e5526c4 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -69,7 +69,6 @@ struct gtp_dev {
+       struct socket           *sock0;
+       struct socket           *sock1u;
+ 
+-      struct net              *net;
+       struct net_device       *dev;
+ 
+       unsigned int            hash_size;
+@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff 
*skb)
+ 
+       netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+ 
+-      xnet = !net_eq(gtp->net, dev_net(gtp->dev));
++      xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
+ 
+       switch (udp_sk(sk)->encap_type) {
+       case UDP_ENCAP_GTP0:
+@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, 
struct net_device *dev)
+                                   pktinfo.fl4.saddr, pktinfo.fl4.daddr,
+                                   pktinfo.iph->tos,
+                                   ip4_dst_hoplimit(&pktinfo.rt->dst),
+-                                  htons(IP_DF),
++                                  0,
+                                   pktinfo.gtph_port, pktinfo.gtph_port,
+                                   true, false);
+               break;
+@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
+ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
+ static void gtp_hashtable_free(struct gtp_dev *gtp);
+ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+-                          int fd_gtp0, int fd_gtp1, struct net *src_net);
++                          int fd_gtp0, int fd_gtp1);
+ 
+ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+                       struct nlattr *tb[], struct nlattr *data[])
+@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct 
net_device *dev,
+       fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+       fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+ 
+-      err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
++      err = gtp_encap_enable(dev, gtp, fd0, fd1);
+       if (err < 0)
+               goto out_err;
+ 
+@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
+ }
+ 
+ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+-                          int fd_gtp0, int fd_gtp1, struct net *src_net)
++                          int fd_gtp0, int fd_gtp1)
+ {
+       struct udp_tunnel_sock_cfg tuncfg = {NULL};
+       struct socket *sock0, *sock1u;
+@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct 
gtp_dev *gtp,
+ 
+       gtp->sock0 = sock0;
+       gtp->sock1u = sock1u;
+-      gtp->net = src_net;
+ 
+       tuncfg.sk_user_data = gtp;
+       tuncfg.encap_rcv = gtp_encap_recv;
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 222918828655..fbf5945ce00d 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1020,7 +1020,7 @@ static struct phy_driver ksphy_driver[] = {
+       .phy_id         = PHY_ID_KSZ8795,
+       .phy_id_mask    = MICREL_PHY_ID_MASK,
+       .name           = "Micrel KSZ8795",
+-      .features       = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
++      .features       = PHY_BASIC_FEATURES,
+       .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+       .config_init    = kszphy_config_init,
+       .config_aneg    = ksz8873mll_config_aneg,
+diff --git a/drivers/net/xen-netback/common.h 
b/drivers/net/xen-netback/common.h
+index cb7365bdf6e0..5b1d2e8402d9 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -113,10 +113,10 @@ struct xenvif_stats {
+        * A subset of struct net_device_stats that contains only the
+        * fields that are updated in netback.c for each queue.
+        */
+-      unsigned int rx_bytes;
+-      unsigned int rx_packets;
+-      unsigned int tx_bytes;
+-      unsigned int tx_packets;
++      u64 rx_bytes;
++      u64 rx_packets;
++      u64 tx_bytes;
++      u64 tx_packets;
+ 
+       /* Additional stats used by xenvif */
+       unsigned long rx_gso_checksum_fixup;
+diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
+index 5bfaf5578810..618013e7f87b 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -225,10 +225,10 @@ static struct net_device_stats *xenvif_get_stats(struct 
net_device *dev)
+ {
+       struct xenvif *vif = netdev_priv(dev);
+       struct xenvif_queue *queue = NULL;
+-      unsigned long rx_bytes = 0;
+-      unsigned long rx_packets = 0;
+-      unsigned long tx_bytes = 0;
+-      unsigned long tx_packets = 0;
++      u64 rx_bytes = 0;
++      u64 rx_packets = 0;
++      u64 tx_bytes = 0;
++      u64 tx_packets = 0;
+       unsigned int index;
+ 
+       spin_lock(&vif->lock);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index cd442e46afb4..8d498a997e25 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1854,27 +1854,19 @@ static int talk_to_netback(struct xenbus_device *dev,
+               xennet_destroy_queues(info);
+ 
+       err = xennet_create_queues(info, &num_queues);
+-      if (err < 0)
+-              goto destroy_ring;
++      if (err < 0) {
++              xenbus_dev_fatal(dev, err, "creating queues");
++              kfree(info->queues);
++              info->queues = NULL;
++              goto out;
++      }
+ 
+       /* Create shared ring, alloc event channel -- for each queue */
+       for (i = 0; i < num_queues; ++i) {
+               queue = &info->queues[i];
+               err = setup_netfront(dev, queue, feature_split_evtchn);
+-              if (err) {
+-                      /* setup_netfront() will tidy up the current
+-                       * queue on error, but we need to clean up
+-                       * those already allocated.
+-                       */
+-                      if (i > 0) {
+-                              rtnl_lock();
+-                              netif_set_real_num_tx_queues(info->netdev, i);
+-                              rtnl_unlock();
+-                              goto destroy_ring;
+-                      } else {
+-                              goto out;
+-                      }
+-              }
++              if (err)
++                      goto destroy_ring;
+       }
+ 
+ again:
+@@ -1964,9 +1956,9 @@ static int talk_to_netback(struct xenbus_device *dev,
+       xenbus_transaction_end(xbt, 1);
+  destroy_ring:
+       xennet_disconnect_backend(info);
+-      kfree(info->queues);
+-      info->queues = NULL;
++      xennet_destroy_queues(info);
+  out:
++      device_unregister(&dev->dev);
+       return err;
+ }
+ 
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 55ce769cecee..fbd6d487103f 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
+       list_del(&ctrl->subsys_entry);
+       mutex_unlock(&subsys->lock);
+ 
++      flush_work(&ctrl->async_event_work);
++      cancel_work_sync(&ctrl->fatal_err_work);
++
+       ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
+       nvmet_subsys_put(subsys);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_target.c 
b/drivers/scsi/qla2xxx/qla_target.c
+index 91f5f55a8a9b..59059ffbb98c 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void 
*iocb, int mcmd)
+ {
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+-      uint32_t unpacked_lun, lun = 0;
+       uint16_t loop_id;
+       int res = 0;
+       struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+-      struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+       unsigned long flags;
+ 
+       loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void 
*iocb, int mcmd)
+           "loop_id %d)\n", vha->host_no, sess, sess->port_name,
+           mcmd, loop_id);
+ 
+-      lun = a->u.isp24.fcp_cmnd.lun;
+-      unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+-
+-      return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
+-          iocb, QLA24XX_MGMT_SEND_NACK);
++      return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
+ }
+ 
+ /* ha->tgt.sess_lock supposed to be held on entry */
+diff --git a/drivers/spi/spi-axi-spi-engine.c 
b/drivers/spi/spi-axi-spi-engine.c
+index c1eafbd7610a..da51fed143cd 100644
+--- a/drivers/spi/spi-axi-spi-engine.c
++++ b/drivers/spi/spi-axi-spi-engine.c
+@@ -553,7 +553,7 @@ static int spi_engine_probe(struct platform_device *pdev)
+ 
+ static int spi_engine_remove(struct platform_device *pdev)
+ {
+-      struct spi_master *master = platform_get_drvdata(pdev);
++      struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+       struct spi_engine *spi_engine = spi_master_get_devdata(master);
+       int irq = platform_get_irq(pdev, 0);
+ 
+@@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev)
+ 
+       free_irq(irq, master);
+ 
++      spi_master_put(master);
++
+       writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+       writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+       writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index 1de3a772eb7d..cbf02ebb30a2 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -862,7 +862,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
+                               break;
+                       copy32 = copy_bswap32;
+               } else if (bits <= 16) {
+-                      if (l & 1)
++                      if (l & 3)
+                               break;
+                       copy32 = copy_wswap32;
+               } else {
+diff --git a/drivers/staging/greybus/loopback.c 
b/drivers/staging/greybus/loopback.c
+index 29dc249b0c74..3c2c233c2e49 100644
+--- a/drivers/staging/greybus/loopback.c
++++ b/drivers/staging/greybus/loopback.c
+@@ -1034,8 +1034,10 @@ static int gb_loopback_fn(void *data)
+                               error = gb_loopback_async_sink(gb, size);
+                       }
+ 
+-                      if (error)
++                      if (error) {
+                               gb->error++;
++                              gb->iteration_count++;
++                      }
+               } else {
+                       /* We are effectively single threaded here */
+                       if (type == GB_LOOPBACK_TYPE_PING)
+diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c 
b/drivers/staging/lustre/lustre/llite/llite_mmap.c
+index 436691814a5e..27333d973bcd 100644
+--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
++++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
+@@ -401,15 +401,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, 
struct vm_fault *vmf)
+               result = VM_FAULT_LOCKED;
+               break;
+       case -ENODATA:
++      case -EAGAIN:
+       case -EFAULT:
+               result = VM_FAULT_NOPAGE;
+               break;
+       case -ENOMEM:
+               result = VM_FAULT_OOM;
+               break;
+-      case -EAGAIN:
+-              result = VM_FAULT_RETRY;
+-              break;
+       default:
+               result = VM_FAULT_SIGBUS;
+               break;
+diff --git a/drivers/staging/media/cec/cec-adap.c 
b/drivers/staging/media/cec/cec-adap.c
+index 499d7bfe7147..75e6d5e0504f 100644
+--- a/drivers/staging/media/cec/cec-adap.c
++++ b/drivers/staging/media/cec/cec-adap.c
+@@ -608,8 +608,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct 
cec_msg *msg,
+       }
+       memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
+       if (msg->len == 1) {
+-              if (cec_msg_initiator(msg) != 0xf ||
+-                  cec_msg_destination(msg) == 0xf) {
++              if (cec_msg_destination(msg) == 0xf) {
+                       dprintk(1, "cec_transmit_msg: invalid poll message\n");
+                       return -EINVAL;
+               }
+@@ -634,7 +633,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct 
cec_msg *msg,
+               dprintk(1, "cec_transmit_msg: destination is the adapter 
itself\n");
+               return -EINVAL;
+       }
+-      if (cec_msg_initiator(msg) != 0xf &&
++      if (msg->len > 1 && adap->is_configured &&
+           !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
+               dprintk(1, "cec_transmit_msg: initiator has unknown logical 
address %d\n",
+                       cec_msg_initiator(msg));
+@@ -883,7 +882,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
+ 
+       /* Send poll message */
+       msg.len = 1;
+-      msg.msg[0] = 0xf0 | log_addr;
++      msg.msg[0] = (log_addr << 4) | log_addr;
+       err = cec_transmit_msg_fh(adap, &msg, NULL, true);
+ 
+       /*
+diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c 
b/drivers/staging/rtl8188eu/core/rtw_mlme.c
+index ee2dcd05010f..0b60d1e0333e 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
++++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
+@@ -107,10 +107,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv 
*pmlmepriv)
+ 
+ void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
+ {
+-      rtw_free_mlme_priv_ie_data(pmlmepriv);
+-
+-      if (pmlmepriv)
++      if (pmlmepriv) {
++              rtw_free_mlme_priv_ie_data(pmlmepriv);
+               vfree(pmlmepriv->free_bss_buf);
++      }
+ }
+ 
+ struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
+diff --git a/drivers/tty/serial/8250/8250_fintek.c 
b/drivers/tty/serial/8250/8250_fintek.c
+index f8c31070a337..2ffebb7e5ff8 100644
+--- a/drivers/tty/serial/8250/8250_fintek.c
++++ b/drivers/tty/serial/8250/8250_fintek.c
+@@ -121,7 +121,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
+ 
+       if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
+                       (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
+-              rs485->flags &= SER_RS485_ENABLED;
++              rs485->flags &= ~SER_RS485_ENABLED;
+       else
+               config |= RS485_URA;
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c 
b/drivers/tty/serial/8250/8250_pci.c
+index 22d32d295c5b..b80ea872b039 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -5568,6 +5568,9 @@ static struct pci_device_id serial_pci_tbl[] = {
+       { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
+       { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
+ 
++      /* Amazon PCI serial device */
++      { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
++
+       /*
+        * These entries match devices with class COMMUNICATION_SERIAL,
+        * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
+diff --git a/drivers/tty/serial/8250/8250_port.c 
b/drivers/tty/serial/8250/8250_port.c
+index 1ef31e3ee4a1..f6e4373a8850 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2526,8 +2526,11 @@ static void serial8250_set_divisor(struct uart_port 
*port, unsigned int baud,
+       serial_dl_write(up, quot);
+ 
+       /* XR17V35x UARTs have an extra fractional divisor register (DLD) */
+-      if (up->port.type == PORT_XR17V35X)
++      if (up->port.type == PORT_XR17V35X) {
++              /* Preserve bits not related to baudrate; DLD[7:4]. */
++              quot_frac |= serial_port_in(port, 0x2) & 0xf0;
+               serial_port_out(port, 0x2, quot_frac);
++      }
+ }
+ 
+ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 701c085bb19b..53cbf4ebef10 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -243,8 +243,10 @@ static void sysrq_handle_showallcpus(int key)
+        * architecture has no support for it:
+        */
+       if (!trigger_all_cpu_backtrace()) {
+-              struct pt_regs *regs = get_irq_regs();
++              struct pt_regs *regs = NULL;
+ 
++              if (in_irq())
++                      regs = get_irq_regs();
+               if (regs) {
+                       pr_info("CPU%d:\n", smp_processor_id());
+                       show_regs(regs);
+@@ -263,7 +265,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
+ 
+ static void sysrq_handle_showregs(int key)
+ {
+-      struct pt_regs *regs = get_irq_regs();
++      struct pt_regs *regs = NULL;
++
++      if (in_irq())
++              regs = get_irq_regs();
+       if (regs)
+               show_regs(regs);
+       perf_event_print_debug();
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 5008f71fb08d..5ebe04d3598b 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -900,14 +900,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
+       }
+ }
+ 
++static const __u8 bos_desc_len[256] = {
++      [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
++      [USB_CAP_TYPE_EXT]          = USB_DT_USB_EXT_CAP_SIZE,
++      [USB_SS_CAP_TYPE]           = USB_DT_USB_SS_CAP_SIZE,
++      [USB_SSP_CAP_TYPE]          = USB_DT_USB_SSP_CAP_SIZE(1),
++      [CONTAINER_ID_TYPE]         = USB_DT_USB_SS_CONTN_ID_SIZE,
++      [USB_PTM_CAP_TYPE]          = USB_DT_USB_PTM_ID_SIZE,
++};
++
+ /* Get BOS descriptor set */
+ int usb_get_bos_descriptor(struct usb_device *dev)
+ {
+       struct device *ddev = &dev->dev;
+       struct usb_bos_descriptor *bos;
+       struct usb_dev_cap_header *cap;
++      struct usb_ssp_cap_descriptor *ssp_cap;
+       unsigned char *buffer;
+-      int length, total_len, num, i;
++      int length, total_len, num, i, ssac;
++      __u8 cap_type;
+       int ret;
+ 
+       bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
+@@ -960,7 +971,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+                       dev->bos->desc->bNumDeviceCaps = i;
+                       break;
+               }
++              cap_type = cap->bDevCapabilityType;
+               length = cap->bLength;
++              if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
++                      dev->bos->desc->bNumDeviceCaps = i;
++                      break;
++              }
++
+               total_len -= length;
+ 
+               if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+@@ -968,7 +985,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+                       continue;
+               }
+ 
+-              switch (cap->bDevCapabilityType) {
++              switch (cap_type) {
+               case USB_CAP_TYPE_WIRELESS_USB:
+                       /* Wireless USB cap descriptor is handled by wusb */
+                       break;
+@@ -981,8 +998,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+                               (struct usb_ss_cap_descriptor *)buffer;
+                       break;
+               case USB_SSP_CAP_TYPE:
+-                      dev->bos->ssp_cap =
+-                              (struct usb_ssp_cap_descriptor *)buffer;
++                      ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
++                      ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
++                              USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
++                      if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
++                              dev->bos->ssp_cap = ssp_cap;
+                       break;
+               case CONTAINER_ID_TYPE:
+                       dev->bos->ss_id =
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index fa619354c5c5..893ebae51029 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -134,42 +134,38 @@ enum snoop_when {
+ #define USB_DEVICE_DEV                MKDEV(USB_DEVICE_MAJOR, 0)
+ 
+ /* Limit on the total amount of memory we can allocate for transfers */
+-static unsigned usbfs_memory_mb = 16;
++static u32 usbfs_memory_mb = 16;
+ module_param(usbfs_memory_mb, uint, 0644);
+ MODULE_PARM_DESC(usbfs_memory_mb,
+               "maximum MB allowed for usbfs buffers (0 = no limit)");
+ 
+ /* Hard limit, necessary to avoid arithmetic overflow */
+-#define USBFS_XFER_MAX                (UINT_MAX / 2 - 1000000)
++#define USBFS_XFER_MAX         (UINT_MAX / 2 - 1000000)
+ 
+-static atomic_t usbfs_memory_usage;   /* Total memory currently allocated */
++static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
+ 
+ /* Check whether it's okay to allocate more memory for a transfer */
+-static int usbfs_increase_memory_usage(unsigned amount)
++static int usbfs_increase_memory_usage(u64 amount)
+ {
+-      unsigned lim;
++      u64 lim;
+ 
+-      /*
+-       * Convert usbfs_memory_mb to bytes, avoiding overflows.
+-       * 0 means use the hard limit (effectively unlimited).
+-       */
+       lim = ACCESS_ONCE(usbfs_memory_mb);
+-      if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
+-              lim = USBFS_XFER_MAX;
+-      else
+-              lim <<= 20;
++      lim <<= 20;
+ 
+-      atomic_add(amount, &usbfs_memory_usage);
+-      if (atomic_read(&usbfs_memory_usage) <= lim)
+-              return 0;
+-      atomic_sub(amount, &usbfs_memory_usage);
+-      return -ENOMEM;
++      atomic64_add(amount, &usbfs_memory_usage);
++
++      if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) {
++              atomic64_sub(amount, &usbfs_memory_usage);
++              return -ENOMEM;
++      }
++
++      return 0;
+ }
+ 
+ /* Memory for a transfer is being deallocated */
+-static void usbfs_decrease_memory_usage(unsigned amount)
++static void usbfs_decrease_memory_usage(u64 amount)
+ {
+-      atomic_sub(amount, &usbfs_memory_usage);
++      atomic64_sub(amount, &usbfs_memory_usage);
+ }
+ 
+ static int connected(struct usb_dev_state *ps)
+@@ -1191,7 +1187,7 @@ static int proc_bulk(struct usb_dev_state *ps, void 
__user *arg)
+       if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
+               return -EINVAL;
+       len1 = bulk.len;
+-      if (len1 >= USBFS_XFER_MAX)
++      if (len1 >= (INT_MAX - sizeof(struct urb)))
+               return -EINVAL;
+       ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
+       if (ret)
+@@ -1458,13 +1454,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, 
struct usbdevfs_urb *uurb
+       int number_of_packets = 0;
+       unsigned int stream_id = 0;
+       void *buf;
+-
+-      if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
+-                              USBDEVFS_URB_SHORT_NOT_OK |
++      unsigned long mask =    USBDEVFS_URB_SHORT_NOT_OK |
+                               USBDEVFS_URB_BULK_CONTINUATION |
+                               USBDEVFS_URB_NO_FSBR |
+                               USBDEVFS_URB_ZERO_PACKET |
+-                              USBDEVFS_URB_NO_INTERRUPT))
++                              USBDEVFS_URB_NO_INTERRUPT;
++      /* USBDEVFS_URB_ISO_ASAP is a special case */
++      if (uurb->type == USBDEVFS_URB_TYPE_ISO)
++              mask |= USBDEVFS_URB_ISO_ASAP;
++
++      if (uurb->flags & ~mask)
++                      return -EINVAL;
++
++      if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
+               return -EINVAL;
+       if (uurb->buffer_length > 0 && !uurb->buffer)
+               return -EINVAL;
+@@ -1584,10 +1586,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, 
struct usbdevfs_urb *uurb
+               return -EINVAL;
+       }
+ 
+-      if (uurb->buffer_length >= USBFS_XFER_MAX) {
+-              ret = -EINVAL;
+-              goto error;
+-      }
+       if (uurb->buffer_length > 0 &&
+                       !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
+                               uurb->buffer, uurb->buffer_length)) {
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 706b3d6a7614..d0d3f9ef9f10 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4925,6 +4925,15 @@ static void hub_port_connect(struct usb_hub *hub, int 
port1, u16 portstatus,
+               usb_put_dev(udev);
+               if ((status == -ENOTCONN) || (status == -ENOTSUPP))
+                       break;
++
++              /* When halfway through our retry count, power-cycle the port */
++              if (i == (SET_CONFIG_TRIES / 2) - 1) {
++                      dev_info(&port_dev->dev, "attempt power cycle\n");
++                      usb_hub_set_port_power(hdev, hub, port1, false);
++                      msleep(2 * hub_power_on_good_delay(hub));
++                      usb_hub_set_port_power(hdev, hub, port1, true);
++                      msleep(hub_power_on_good_delay(hub));
++              }
+       }
+       if (hub->hdev->parent ||
+                       !hcd->driver->port_handed_over ||
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 37c418e581fb..50010282c010 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+       /* appletouch */
+       { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++      /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
++      { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
++
+       /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter 
*/
+       { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/gadget/function/f_fs.c 
b/drivers/usb/gadget/function/f_fs.c
+index 273320fa30ae..4fce83266926 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -2263,7 +2263,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type 
type,
+ 
+               if (len < sizeof(*d) ||
+                   d->bFirstInterfaceNumber >= ffs->interfaces_count ||
+-                  !d->Reserved1)
++                  d->Reserved1)
+                       return -EINVAL;
+               for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
+                       if (d->Reserved2[i])
+diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
+index 1a2614aae42c..3ff6468a1f5f 100644
+--- a/drivers/usb/host/ehci-dbg.c
++++ b/drivers/usb/host/ehci-dbg.c
+@@ -837,7 +837,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer 
*buf)
+                       default:                /* unknown */
+                               break;
+                       }
+-                      temp = (cap >> 8) & 0xff;
++                      offset = (cap >> 8) & 0xff;
+               }
+       }
+ #endif
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index b7114c3f52aa..a3ecd8bd5324 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -996,6 +996,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd 
*xhci, int slot_id)
+       if (!vdev)
+               return;
+ 
++      if (vdev->real_port == 0 ||
++                      vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
++              xhci_dbg(xhci, "Bad vdev->real_port.\n");
++              goto out;
++      }
++
+       tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+       list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+               /* is this a hub device that added a tt_info to the tts list */
+@@ -1009,6 +1015,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd 
*xhci, int slot_id)
+                       }
+               }
+       }
++out:
+       /* we are now at a leaf device */
+       xhci_free_virt_device(xhci, slot_id);
+ }
+diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
+index ab5d364f6e8c..335a1ef35224 100644
+--- a/drivers/usb/phy/phy-tahvo.c
++++ b/drivers/usb/phy/phy-tahvo.c
+@@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
+       tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
+       if (IS_ERR(tu->extcon)) {
+               dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
+-              return -ENOMEM;
++              ret = PTR_ERR(tu->extcon);
++              goto err_disable_clk;
+       }
+ 
+       ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index db3d34c2c82e..ffa8ec917ff5 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
+ /* These Quectel products use Quectel's vendor ID */
+ #define QUECTEL_PRODUCT_EC21                  0x0121
+ #define QUECTEL_PRODUCT_EC25                  0x0125
++#define QUECTEL_PRODUCT_BG96                  0x0296
+ 
+ #define CMOTECH_VENDOR_ID                     0x16d8
+ #define CMOTECH_PRODUCT_6001                  0x6001
+@@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++      { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
++        .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+diff --git a/drivers/usb/storage/uas-detect.h 
b/drivers/usb/storage/uas-detect.h
+index a155cd02bce2..ecc83c405a8b 100644
+--- a/drivers/usb/storage/uas-detect.h
++++ b/drivers/usb/storage/uas-detect.h
+@@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
+               }
+       }
+ 
++      /* All Seagate disk enclosures have broken ATA pass-through support */
++      if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
++              flags |= US_FL_NO_ATA_1X;
++
+       usb_stor_adjust_quirks(udev, &flags);
+ 
+       if (flags & US_FL_IGNORE_UAS) {
+diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c 
b/drivers/vfio/vfio_iommu_spapr_tce.c
+index 85d3e648bdea..59b3f62a2d64 100644
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c
+@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
+               mutex_lock(&container->lock);
+ 
+               ret = tce_iommu_create_default_window(container);
+-              if (ret)
+-                      return ret;
+-
+-              ret = tce_iommu_create_window(container, create.page_shift,
+-                              create.window_size, create.levels,
+-                              &create.start_addr);
++              if (!ret)
++                      ret = tce_iommu_create_window(container,
++                                      create.page_shift,
++                                      create.window_size, create.levels,
++                                      &create.start_addr);
+ 
+               mutex_unlock(&container->lock);
+ 
+diff --git a/fs/dax.c b/fs/dax.c
+index bf6218da7928..800748f10b3d 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1265,6 +1265,17 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t 
length, void *data,
+       if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
+               return -EIO;
+ 
++      /*
++       * Write can allocate block for an area which has a hole page mapped
++       * into page tables. We have to tear down these mappings so that data
++       * written by write(2) is visible in mmap.
++       */
++      if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
++              invalidate_inode_pages2_range(inode->i_mapping,
++                                            pos >> PAGE_SHIFT,
++                                            (end - 1) >> PAGE_SHIFT);
++      }
++
+       while (pos < end) {
+               unsigned offset = pos & (PAGE_SIZE - 1);
+               struct blk_dax_ctl dax = { 0 };
+@@ -1329,23 +1340,6 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+       if (iov_iter_rw(iter) == WRITE)
+               flags |= IOMAP_WRITE;
+ 
+-      /*
+-       * Yes, even DAX files can have page cache attached to them:  A zeroed
+-       * page is inserted into the pagecache when we have to serve a write
+-       * fault on a hole.  It should never be dirtied and can simply be
+-       * dropped from the pagecache once we get real data for the page.
+-       *
+-       * XXX: This is racy against mmap, and there's nothing we can do about
+-       * it. We'll eventually need to shift this down even further so that
+-       * we can check if we allocated blocks over a hole first.
+-       */
+-      if (mapping->nrpages) {
+-              ret = invalidate_inode_pages2_range(mapping,
+-                              pos >> PAGE_SHIFT,
+-                              (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
+-              WARN_ON_ONCE(ret);
+-      }
+-
+       while (iov_iter_count(iter)) {
+               ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
+                               iter, iomap_dax_actor);
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 48826d4da189..9588780ad43e 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type 
*fs_type, char *name,
+       struct inode *root;
+       struct qstr d_name = QSTR_INIT(name, strlen(name));
+ 
+-      s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
++      s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
++                      &init_user_ns, NULL);
+       if (IS_ERR(s))
+               return ERR_CAST(s);
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 67845220fc27..4638654e26f3 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -38,7 +38,6 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/errno.h>
+-#include <linux/file.h>
+ #include <linux/string.h>
+ #include <linux/ratelimit.h>
+ #include <linux/printk.h>
+@@ -6006,7 +6005,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct 
file_lock *fl,
+       p->server = server;
+       atomic_inc(&lsp->ls_count);
+       p->ctx = get_nfs_open_context(ctx);
+-      get_file(fl->fl_file);
+       memcpy(&p->fl, fl, sizeof(p->fl));
+       return p;
+ out_free_seqid:
+@@ -6119,7 +6117,6 @@ static void nfs4_lock_release(void *calldata)
+               nfs_free_seqid(data->arg.lock_seqid);
+       nfs4_put_lock_state(data->lsp);
+       put_nfs_open_context(data->ctx);
+-      fput(data->fl.fl_file);
+       kfree(data);
+       dprintk("%s: done!\n", __func__);
+ }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 92671914067f..71deeae6eefd 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1718,7 +1718,6 @@ static int nfs4_recovery_handle_error(struct nfs_client 
*clp, int error)
+                       break;
+               case -NFS4ERR_STALE_CLIENTID:
+                       set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+-                      nfs4_state_clear_reclaim_reboot(clp);
+                       nfs4_state_start_reclaim_reboot(clp);
+                       break;
+               case -NFS4ERR_EXPIRED:
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index 447a915db25d..4431ea2c8802 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -239,12 +239,10 @@ static inline int block_page_mkwrite_return(int err)
+ {
+       if (err == 0)
+               return VM_FAULT_LOCKED;
+-      if (err == -EFAULT)
++      if (err == -EFAULT || err == -EAGAIN)
+               return VM_FAULT_NOPAGE;
+       if (err == -ENOMEM)
+               return VM_FAULT_OOM;
+-      if (err == -EAGAIN)
+-              return VM_FAULT_RETRY;
+       /* -ENOSPC, -EDQUOT, -EIO ... */
+       return VM_FAULT_SIGBUS;
+ }
+diff --git a/include/linux/fence.h b/include/linux/fence.h
+index 0d763053f97a..9bb2c0c97a21 100644
+--- a/include/linux/fence.h
++++ b/include/linux/fence.h
+@@ -47,7 +47,7 @@ struct fence_cb;
+  * can be compared to decide which fence would be signaled later.
+  * @flags: A mask of FENCE_FLAG_* defined below
+  * @timestamp: Timestamp when the fence was signaled.
+- * @status: Optional, only valid if < 0, must be set before calling
++ * @error: Optional, only valid if < 0, must be set before calling
+  * fence_signal, indicates that the fence has completed with an error.
+  *
+  * the flags member must be manipulated and read using the appropriate
+@@ -79,7 +79,7 @@ struct fence {
+       unsigned seqno;
+       unsigned long flags;
+       ktime_t timestamp;
+-      int status;
++      int error;
+ };
+ 
+ enum fence_flag_bits {
+@@ -132,7 +132,7 @@ struct fence_cb {
+  * or some failure occurred that made it impossible to enable
+  * signaling. True indicates successful enabling.
+  *
+- * fence->status may be set in enable_signaling, but only when false is
++ * fence->error may be set in enable_signaling, but only when false is
+  * returned.
+  *
+  * Calling fence_signal before enable_signaling is called allows
+@@ -144,7 +144,7 @@ struct fence_cb {
+  * the second time will be a noop since it was already signaled.
+  *
+  * Notes on signaled:
+- * May set fence->status if returning true.
++ * May set fence->error if returning true.
+  *
+  * Notes on wait:
+  * Must not be NULL, set to fence_default_wait for default implementation.
+@@ -280,6 +280,19 @@ fence_is_signaled(struct fence *fence)
+       return false;
+ }
+ 
++/**
++ * __fence_is_later - return if f1 is chronologically later than f2
++ * @f1:       [in]    the first fence's seqno
++ * @f2:       [in]    the second fence's seqno from the same context
++ *
++ * Returns true if f1 is chronologically later than f2. Both fences must be
++ * from the same context, since a seqno is not common across contexts.
++ */
++static inline bool __fence_is_later(u32 f1, u32 f2)
++{
++      return (int)(f1 - f2) > 0;
++}
++
+ /**
+  * fence_is_later - return if f1 is chronologically later than f2
+  * @f1:       [in]    the first fence from the same context
+@@ -293,7 +306,7 @@ static inline bool fence_is_later(struct fence *f1, struct 
fence *f2)
+       if (WARN_ON(f1->context != f2->context))
+               return false;
+ 
+-      return (int)(f1->seqno - f2->seqno) > 0;
++      return __fence_is_later(f1->seqno, f2->seqno);
+ }
+ 
+ /**
+@@ -321,6 +334,50 @@ static inline struct fence *fence_later(struct fence *f1, 
struct fence *f2)
+               return fence_is_signaled(f2) ? NULL : f2;
+ }
+ 
++/**
++ * fence_get_status_locked - returns the status upon completion
++ * @fence: [in]       the fence to query
++ *
++ * Drivers can supply an optional error status condition before they signal
++ * the fence (to indicate whether the fence was completed due to an error
++ * rather than success). The value of the status condition is only valid
++ * if the fence has been signaled, fence_get_status_locked() first checks
++ * the signal state before reporting the error status.
++ *
++ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
++ * been signaled without an error condition, or a negative error code
++ * if the fence has been completed in err.
++ */
++static inline int fence_get_status_locked(struct fence *fence)
++{
++      if (fence_is_signaled_locked(fence))
++              return fence->error ?: 1;
++      else
++              return 0;
++}
++
++int fence_get_status(struct fence *fence);
++
++/**
++ * fence_set_error - flag an error condition on the fence
++ * @fence: [in]       the fence
++ * @error: [in]       the error to store
++ *
++ * Drivers can supply an optional error status condition before they signal
++ * the fence, to indicate that the fence was completed due to an error
++ * rather than success. This must be set before signaling (so that the value
++ * is visible before any waiters on the signal callback are woken). This
++ * helper exists to help catching erroneous setting of #fence.error.
++ */
++static inline void fence_set_error(struct fence *fence,
++                                     int error)
++{
++      BUG_ON(test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags));
++      BUG_ON(error >= 0 || error < -MAX_ERRNO);
++
++      fence->error = error;
++}
++
+ signed long fence_wait_timeout(struct fence *, bool intr, signed long 
timeout);
+ signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
+                                  bool intr, signed long timeout);
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 4741ecdb9817..78ed8105e64d 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
+ extern void perf_event_disable_local(struct perf_event *event);
+ extern void perf_event_disable_inatomic(struct perf_event *event);
+ extern void perf_event_task_tick(void);
++extern int perf_event_account_interrupt(struct perf_event *event);
+ #else /* !CONFIG_PERF_EVENTS: */
+ static inline void *
+ perf_aux_output_begin(struct perf_output_handle *handle,
+diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
+index 5e64a86989a5..ab1dadba9923 100644
+--- a/include/uapi/linux/usb/ch9.h
++++ b/include/uapi/linux/usb/ch9.h
+@@ -854,6 +854,8 @@ struct usb_wireless_cap_descriptor {       /* Ultra Wide 
Band */
+       __u8  bReserved;
+ } __attribute__((packed));
+ 
++#define USB_DT_USB_WIRELESS_CAP_SIZE  11
++
+ /* USB 2.0 Extension descriptor */
+ #define       USB_CAP_TYPE_EXT                2
+ 
+@@ -1046,6 +1048,7 @@ struct usb_ptm_cap_descriptor {
+       __u8  bDevCapabilityType;
+ } __attribute__((packed));
+ 
++#define USB_DT_USB_PTM_ID_SIZE                3
+ /*
+  * The size of the descriptor for the Sublink Speed Attribute Count
+  * (SSAC) specified in bmAttributes[4:0].
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 36ff2d93f222..13b9784427b0 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7088,25 +7088,12 @@ static void perf_log_itrace_start(struct perf_event 
*event)
+       perf_output_end(&handle);
+ }
+ 
+-/*
+- * Generic event overflow handling, sampling.
+- */
+-
+-static int __perf_event_overflow(struct perf_event *event,
+-                                 int throttle, struct perf_sample_data *data,
+-                                 struct pt_regs *regs)
++static int
++__perf_event_account_interrupt(struct perf_event *event, int throttle)
+ {
+-      int events = atomic_read(&event->event_limit);
+       struct hw_perf_event *hwc = &event->hw;
+-      u64 seq;
+       int ret = 0;
+-
+-      /*
+-       * Non-sampling counters might still use the PMI to fold short
+-       * hardware counters, ignore those.
+-       */
+-      if (unlikely(!is_sampling_event(event)))
+-              return 0;
++      u64 seq;
+ 
+       seq = __this_cpu_read(perf_throttled_seq);
+       if (seq != hwc->interrupts_seq) {
+@@ -7134,6 +7121,34 @@ static int __perf_event_overflow(struct perf_event 
*event,
+                       perf_adjust_period(event, delta, hwc->last_period, 
true);
+       }
+ 
++      return ret;
++}
++
++int perf_event_account_interrupt(struct perf_event *event)
++{
++      return __perf_event_account_interrupt(event, 1);
++}
++
++/*
++ * Generic event overflow handling, sampling.
++ */
++
++static int __perf_event_overflow(struct perf_event *event,
++                                 int throttle, struct perf_sample_data *data,
++                                 struct pt_regs *regs)
++{
++      int events = atomic_read(&event->event_limit);
++      int ret = 0;
++
++      /*
++       * Non-sampling counters might still use the PMI to fold short
++       * hardware counters, ignore those.
++       */
++      if (unlikely(!is_sampling_event(event)))
++              return 0;
++
++      ret = __perf_event_account_interrupt(event, throttle);
++
+       /*
+        * XXX event_limit might not quite work as expected on inherited
+        * events
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index d631d251c150..4a184157cc3d 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -524,7 +524,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, 
struct mm_struct *mm)
+        */
+       set_bit(MMF_UNSTABLE, &mm->flags);
+ 
+-      tlb_gather_mmu(&tlb, mm, 0, -1);
+       for (vma = mm->mmap ; vma; vma = vma->vm_next) {
+               if (is_vm_hugetlb_page(vma))
+                       continue;
+@@ -546,11 +545,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, 
struct mm_struct *mm)
+                * we do not want to block exit_mmap by keeping mm ref
+                * count elevated without a good reason.
+                */
+-              if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
++              if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
++                      tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
+                       unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
+                                        &details);
++                      tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
++              }
+       }
+-      tlb_finish_mmu(&tlb, 0, -1);
+       pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, 
file-rss:%lukB, shmem-rss:%lukB\n",
+                       task_pid_nr(tsk), tsk->comm,
+                       K(get_mm_counter(mm, MM_ANONPAGES)),
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index ef5ee56095e8..fbc38888252b 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2592,30 +2592,23 @@ int __isolate_free_page(struct page *page, unsigned 
int order)
+  * Update NUMA hit/miss statistics
+  *
+  * Must be called with interrupts disabled.
+- *
+- * When __GFP_OTHER_NODE is set assume the node of the preferred
+- * zone is the local node. This is useful for daemons who allocate
+- * memory on behalf of other processes.
+  */
+ static inline void zone_statistics(struct zone *preferred_zone, struct zone 
*z,
+                                                               gfp_t flags)
+ {
+ #ifdef CONFIG_NUMA
+-      int local_nid = numa_node_id();
+       enum zone_stat_item local_stat = NUMA_LOCAL;
+ 
+-      if (unlikely(flags & __GFP_OTHER_NODE)) {
++      if (z->node != numa_node_id())
+               local_stat = NUMA_OTHER;
+-              local_nid = preferred_zone->node;
+-      }
+ 
+-      if (z->node == local_nid) {
++      if (z->node == preferred_zone->node)
+               __inc_zone_state(z, NUMA_HIT);
+-              __inc_zone_state(z, local_stat);
+-      } else {
++      else {
+               __inc_zone_state(z, NUMA_MISS);
+               __inc_zone_state(preferred_zone, NUMA_FOREIGN);
+       }
++      __inc_zone_state(z, local_stat);
+ #endif
+ }
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 8fcd0c642742..05255a286888 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5081,7 +5081,7 @@ static void tcp_check_space(struct sock *sk)
+       if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
+               sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
+               /* pairs with tcp_poll() */
+-              smp_mb__after_atomic();
++              smp_mb();
+               if (sk->sk_socket &&
+                   test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+                       tcp_new_space(sk);
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 816f79d1a8a3..67e882d49195 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       int err;
+ 
++      dev->rtnl_link_ops = &vti6_link_ops;
+       err = register_netdevice(dev);
+       if (err < 0)
+               goto out;
+ 
+       strcpy(t->parms.name, dev->name);
+-      dev->rtnl_link_ops = &vti6_link_ops;
+ 
+       dev_hold(dev);
+       vti6_tnl_link(ip6n, t);
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 3468d5635d0a..9d77a54e8854 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -48,7 +48,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct 
sock *sk)
+       return (struct l2tp_ip_sock *)sk;
+ }
+ 
+-static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int 
dif, u32 tunnel_id)
++static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
++                                        __be32 raddr, int dif, u32 tunnel_id)
+ {
+       struct sock *sk;
+ 
+@@ -62,6 +63,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, 
__be32 laddr, int dif
+               if ((l2tp->conn_id == tunnel_id) &&
+                   net_eq(sock_net(sk), net) &&
+                   !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
++                  (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) 
&&
+                   (!sk->sk_bound_dev_if || !dif ||
+                    sk->sk_bound_dev_if == dif))
+                       goto found;
+@@ -72,15 +74,6 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, 
__be32 laddr, int dif
+       return sk;
+ }
+ 
+-static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, 
int dif, u32 tunnel_id)
+-{
+-      struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
+-      if (sk)
+-              sock_hold(sk);
+-
+-      return sk;
+-}
+-
+ /* When processing receive frames, there are two cases to
+  * consider. Data frames consist of a non-zero session-id and an
+  * optional cookie. Control frames consist of a regular L2TP header
+@@ -186,8 +179,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+               struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
+ 
+               read_lock_bh(&l2tp_ip_lock);
+-              sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
+-                                         tunnel_id);
++              sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
++                                         inet_iif(skb), tunnel_id);
+               if (!sk) {
+                       read_unlock_bh(&l2tp_ip_lock);
+                       goto discard;
+@@ -289,7 +282,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+               inet->inet_saddr = 0;  /* Use device */
+ 
+       write_lock_bh(&l2tp_ip_lock);
+-      if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
++      if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
+                                 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
+               write_unlock_bh(&l2tp_ip_lock);
+               ret = -EADDRINUSE;
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 1d522ce833e6..247097289fd0 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const 
struct sock *sk)
+ 
+ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
+                                          struct in6_addr *laddr,
++                                         const struct in6_addr *raddr,
+                                          int dif, u32 tunnel_id)
+ {
+       struct sock *sk;
+ 
+       sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
+               const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
++              const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
+               struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
+ 
+               if (l2tp == NULL)
+@@ -73,6 +75,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
+               if ((l2tp->conn_id == tunnel_id) &&
+                   net_eq(sock_net(sk), net) &&
+                   (!sk_laddr || ipv6_addr_any(sk_laddr) || 
ipv6_addr_equal(sk_laddr, laddr)) &&
++                  (!raddr || ipv6_addr_any(sk_raddr) || 
ipv6_addr_equal(sk_raddr, raddr)) &&
+                   (!sk->sk_bound_dev_if || !dif ||
+                    sk->sk_bound_dev_if == dif))
+                       goto found;
+@@ -83,17 +86,6 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
+       return sk;
+ }
+ 
+-static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
+-                                              struct in6_addr *laddr,
+-                                              int dif, u32 tunnel_id)
+-{
+-      struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
+-      if (sk)
+-              sock_hold(sk);
+-
+-      return sk;
+-}
+-
+ /* When processing receive frames, there are two cases to
+  * consider. Data frames consist of a non-zero session-id and an
+  * optional cookie. Control frames consist of a regular L2TP header
+@@ -200,8 +192,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+               struct ipv6hdr *iph = ipv6_hdr(skb);
+ 
+               read_lock_bh(&l2tp_ip6_lock);
+-              sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
+-                                          tunnel_id);
++              sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
++                                          inet6_iif(skb), tunnel_id);
+               if (!sk) {
+                       read_unlock_bh(&l2tp_ip6_lock);
+                       goto discard;
+@@ -339,7 +331,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       rcu_read_unlock();
+ 
+       write_lock_bh(&l2tp_ip6_lock);
+-      if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
++      if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
+                                  addr->l2tp_conn_id)) {
+               write_unlock_bh(&l2tp_ip6_lock);
+               err = -EADDRINUSE;
+diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
+index e75cbf6ecc26..a0d901d8992e 100644
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data 
*sdata)
+                   !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
+                       continue;
+ 
+-              if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
+-                      continue;
+-
+               max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
+       }
+       rcu_read_unlock();
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 274c564bd9af..1ffd1e145c13 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1244,7 +1244,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
+ 
+ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
+                                         struct ieee80211_vif *vif,
+-                                        struct ieee80211_sta *pubsta,
++                                        struct sta_info *sta,
+                                         struct sk_buff *skb)
+ {
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+@@ -1258,10 +1258,13 @@ static struct txq_info *ieee80211_get_txq(struct 
ieee80211_local *local,
+       if (!ieee80211_is_data(hdr->frame_control))
+               return NULL;
+ 
+-      if (pubsta) {
++      if (sta) {
+               u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ 
+-              txq = pubsta->txq[tid];
++              if (!sta->uploaded)
++                      return NULL;
++
++              txq = sta->sta.txq[tid];
+       } else if (vif) {
+               txq = vif->txq;
+       }
+@@ -1499,23 +1502,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local 
*local,
+       struct fq *fq = &local->fq;
+       struct ieee80211_vif *vif;
+       struct txq_info *txqi;
+-      struct ieee80211_sta *pubsta;
+ 
+       if (!local->ops->wake_tx_queue ||
+           sdata->vif.type == NL80211_IFTYPE_MONITOR)
+               return false;
+ 
+-      if (sta && sta->uploaded)
+-              pubsta = &sta->sta;
+-      else
+-              pubsta = NULL;
+-
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+               sdata = container_of(sdata->bss,
+                                    struct ieee80211_sub_if_data, u.ap);
+ 
+       vif = &sdata->vif;
+-      txqi = ieee80211_get_txq(local, vif, pubsta, skb);
++      txqi = ieee80211_get_txq(local, vif, sta, skb);
+ 
+       if (!txqi)
+               return false;
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index c985ecbe9bd6..ae5ac175b2be 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
+       const int pkt_len = 20;
+       struct qrtr_hdr *hdr;
+       struct sk_buff *skb;
+-      u32 *buf;
++      __le32 *buf;
+ 
+       skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
+       if (!skb)
+@@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
+       hdr->dst_node_id = cpu_to_le32(dst_node);
+       hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
+ 
+-      buf = (u32 *)skb_put(skb, pkt_len);
++      buf = (__le32 *)skb_put(skb, pkt_len);
+       memset(buf, 0, pkt_len);
+       buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+       buf[1] = cpu_to_le32(src_node);
+diff --git a/net/sctp/debug.c b/net/sctp/debug.c
+index 95d7b15dad21..e371a0d90068 100644
+--- a/net/sctp/debug.c
++++ b/net/sctp/debug.c
+@@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = {
+ /* Lookup timer debug name. */
+ const char *sctp_tname(const sctp_subtype_t id)
+ {
+-      if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
++      if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
+               return sctp_timer_tbl[id.timeout];
+       return "unknown_timer";
+ }
+diff --git a/net/tipc/server.c b/net/tipc/server.c
+index f89c0c2e8c16..3cd6402e812c 100644
+--- a/net/tipc/server.c
++++ b/net/tipc/server.c
+@@ -86,7 +86,6 @@ struct outqueue_entry {
+ static void tipc_recv_work(struct work_struct *work);
+ static void tipc_send_work(struct work_struct *work);
+ static void tipc_clean_outqueues(struct tipc_conn *con);
+-static void tipc_sock_release(struct tipc_conn *con);
+ 
+ static void tipc_conn_kref_release(struct kref *kref)
+ {
+@@ -104,7 +103,6 @@ static void tipc_conn_kref_release(struct kref *kref)
+               }
+               saddr->scope = -TIPC_NODE_SCOPE;
+               kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
+-              tipc_sock_release(con);
+               sock_release(sock);
+               con->sock = NULL;
+ 
+@@ -194,19 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn 
*con)
+       write_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
+-static void tipc_sock_release(struct tipc_conn *con)
++static void tipc_close_conn(struct tipc_conn *con)
+ {
+       struct tipc_server *s = con->server;
+ 
+-      if (con->conid)
+-              s->tipc_conn_release(con->conid, con->usr_data);
+-
+-      tipc_unregister_callbacks(con);
+-}
+-
+-static void tipc_close_conn(struct tipc_conn *con)
+-{
+       if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
++              tipc_unregister_callbacks(con);
++
++              if (con->conid)
++                      s->tipc_conn_release(con->conid, con->usr_data);
+ 
+               /* We shouldn't flush pending works as we may be in the
+                * thread. In fact the races with pending rx/tx work structs
+@@ -625,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
+ void tipc_server_stop(struct tipc_server *s)
+ {
+       struct tipc_conn *con;
+-      int total = 0;
+       int id;
+ 
+       spin_lock_bh(&s->idr_lock);
+-      for (id = 0; total < s->idr_in_use; id++) {
++      for (id = 0; s->idr_in_use; id++) {
+               con = idr_find(&s->conn_idr, id);
+               if (con) {
+-                      total++;
+                       spin_unlock_bh(&s->idr_lock);
+                       tipc_close_conn(con);
+                       spin_lock_bh(&s->idr_lock);
+diff --git a/security/integrity/ima/ima_main.c 
b/security/integrity/ima/ima_main.c
+index 0e8762945e79..2b3def14b4fb 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -51,6 +51,8 @@ static int __init hash_setup(char *str)
+                       ima_hash_algo = HASH_ALGO_SHA1;
+               else if (strncmp(str, "md5", 3) == 0)
+                       ima_hash_algo = HASH_ALGO_MD5;
++              else
++                      return 1;
+               goto out;
+       }
+ 
+@@ -60,6 +62,8 @@ static int __init hash_setup(char *str)
+                       break;
+               }
+       }
++      if (i == HASH_ALGO__LAST)
++              return 1;
+ out:
+       hash_setup_done = 1;
+       return 1;
+diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
+index 51334edec506..f306a7642509 100644
+--- a/tools/include/linux/poison.h
++++ b/tools/include/linux/poison.h
+@@ -14,6 +14,10 @@
+ # define POISON_POINTER_DELTA 0
+ #endif
+ 
++#ifdef __cplusplus
++#define LIST_POISON1  NULL
++#define LIST_POISON2  NULL
++#else
+ /*
+  * These are non-NULL pointers that will result in page faults
+  * under normal circumstances, used to verify that nobody uses
+@@ -21,6 +25,7 @@
+  */
+ #define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
+ #define LIST_POISON2  ((void *) 0x200 + POISON_POINTER_DELTA)
++#endif
+ 
+ /********** include/linux/timer.h **********/
+ /*
+diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
+index 28d1605b0338..b60a6fd66517 100644
+--- a/tools/perf/tests/attr.c
++++ b/tools/perf/tests/attr.c
+@@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf)
+       snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
+                d, d, perf, vcnt, v);
+ 
+-      return system(cmd);
++      return system(cmd) ? TEST_FAIL : TEST_OK;
+ }
+ 
+ int test__attr(int subtest __maybe_unused)
+diff --git a/tools/testing/selftests/x86/ldt_gdt.c 
b/tools/testing/selftests/x86/ldt_gdt.c
+index e717fed80219..f936a3cd3e35 100644
+--- a/tools/testing/selftests/x86/ldt_gdt.c
++++ b/tools/testing/selftests/x86/ldt_gdt.c
+@@ -360,9 +360,24 @@ static void do_simple_tests(void)
+       install_invalid(&desc, false);
+ 
+       desc.seg_not_present = 0;
+-      desc.read_exec_only = 0;
+       desc.seg_32bit = 1;
++      desc.read_exec_only = 0;
++      desc.limit = 0xfffff;
++
+       install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
++
++      desc.limit_in_pages = 1;
++
++      install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | 
AR_G);
++      desc.read_exec_only = 1;
++      install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | 
AR_G);
++      desc.contents = 1;
++      desc.read_exec_only = 0;
++      install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | 
AR_DB | AR_G);
++      desc.read_exec_only = 1;
++      install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | 
AR_DB | AR_G);
++
++      desc.limit = 0;
+       install_invalid(&desc, true);
+ }
+ 
+diff --git a/tools/usb/usbip/Makefile.am b/tools/usb/usbip/Makefile.am
+index 66f8bf038c9f..45eaa70a71e0 100644
+--- a/tools/usb/usbip/Makefile.am
++++ b/tools/usb/usbip/Makefile.am
+@@ -1,6 +1,7 @@
+ SUBDIRS := libsrc src
+ includedir = @includedir@/usbip
+ include_HEADERS := $(addprefix libsrc/, \
+-                   usbip_common.h vhci_driver.h usbip_host_driver.h)
++                   usbip_common.h vhci_driver.h usbip_host_driver.h \
++                   list.h sysfs_utils.h usbip_host_common.h)
+ 
+ dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8)
+diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
+index 27a1f6341d41..7b49a1378c90 100644
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -89,9 +89,6 @@ static void kvm_timer_inject_irq_work(struct work_struct 
*work)
+       struct kvm_vcpu *vcpu;
+ 
+       vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
+-      vcpu->arch.timer_cpu.armed = false;
+-
+-      WARN_ON(!kvm_timer_should_fire(vcpu));
+ 
+       /*
+        * If the vcpu is blocked we want to wake it up so that it will see

Reply via email to