commit:     e3116a6b779f31a17eda36f5b72b3b0cfb8bbd47
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 21 14:44:02 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec 21 14:44:02 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e3116a6b

proj/linux-patches: Linux patch 4.9.147

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1146_linux-4.9.147.patch | 2228 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2232 insertions(+)

diff --git a/0000_README b/0000_README
index 4c5f483..45d73b5 100644
--- a/0000_README
+++ b/0000_README
@@ -627,6 +627,10 @@ Patch:  1145_linux-4.9.146.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.146
 
+Patch:  1146_linux-4.9.147.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.147
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1146_linux-4.9.147.patch b/1146_linux-4.9.147.patch
new file mode 100644
index 0000000..720a69b
--- /dev/null
+++ b/1146_linux-4.9.147.patch
@@ -0,0 +1,2228 @@
+diff --git a/Makefile b/Makefile
+index 0a150d2b3353..3cccc51a57ce 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 146
++SUBLEVEL = 147
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
+index c22b181e8206..2f39d9b3886e 100644
+--- a/arch/arc/include/asm/io.h
++++ b/arch/arc/include/asm/io.h
+@@ -12,6 +12,7 @@
+ #include <linux/types.h>
+ #include <asm/byteorder.h>
+ #include <asm/page.h>
++#include <asm/unaligned.h>
+ 
+ #ifdef CONFIG_ISA_ARCV2
+ #include <asm/barrier.h>
+@@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem 
*addr)
+       return w;
+ }
+ 
++/*
++ * {read,write}s{b,w,l}() repeatedly access the same IO address in
++ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
++ * @count times
++ */
++#define __raw_readsx(t,f) \
++static inline void __raw_reads##f(const volatile void __iomem *addr,  \
++                                void *ptr, unsigned int count)        \
++{                                                                     \
++      bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;        \
++      u##t *buf = ptr;                                                \
++                                                                      \
++      if (!count)                                                     \
++              return;                                                 \
++                                                                      \
++      /* Some ARC CPU's don't support unaligned accesses */           \
++      if (is_aligned) {                                               \
++              do {                                                    \
++                      u##t x = __raw_read##f(addr);                   \
++                      *buf++ = x;                                     \
++              } while (--count);                                      \
++      } else {                                                        \
++              do {                                                    \
++                      u##t x = __raw_read##f(addr);                   \
++                      put_unaligned(x, buf++);                        \
++              } while (--count);                                      \
++      }                                                               \
++}
++
++#define __raw_readsb __raw_readsb
++__raw_readsx(8, b)
++#define __raw_readsw __raw_readsw
++__raw_readsx(16, w)
++#define __raw_readsl __raw_readsl
++__raw_readsx(32, l)
++
+ #define __raw_writeb __raw_writeb
+ static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+ {
+@@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void 
__iomem *addr)
+ 
+ }
+ 
++#define __raw_writesx(t,f)                                            \
++static inline void __raw_writes##f(volatile void __iomem *addr,       \
++                                 const void *ptr, unsigned int count) \
++{                                                                     \
++      bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;        \
++      const u##t *buf = ptr;                                          \
++                                                                      \
++      if (!count)                                                     \
++              return;                                                 \
++                                                                      \
++      /* Some ARC CPU's don't support unaligned accesses */           \
++      if (is_aligned) {                                               \
++              do {                                                    \
++                      __raw_write##f(*buf++, addr);                   \
++              } while (--count);                                      \
++      } else {                                                        \
++              do {                                                    \
++                      __raw_write##f(get_unaligned(buf++), addr);     \
++              } while (--count);                                      \
++      }                                                               \
++}
++
++#define __raw_writesb __raw_writesb
++__raw_writesx(8, b)
++#define __raw_writesw __raw_writesw
++__raw_writesx(16, w)
++#define __raw_writesl __raw_writesl
++__raw_writesx(32, l)
++
+ /*
+  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+  * Based on ARM model for the typical use case
+@@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void 
__iomem *addr)
+ #define readb(c)              ({ u8  __v = readb_relaxed(c); __iormb(); __v; 
})
+ #define readw(c)              ({ u16 __v = readw_relaxed(c); __iormb(); __v; 
})
+ #define readl(c)              ({ u32 __v = readl_relaxed(c); __iormb(); __v; 
})
++#define readsb(p,d,l)         ({ __raw_readsb(p,d,l); __iormb(); })
++#define readsw(p,d,l)         ({ __raw_readsw(p,d,l); __iormb(); })
++#define readsl(p,d,l)         ({ __raw_readsl(p,d,l); __iormb(); })
+ 
+ #define writeb(v,c)           ({ __iowmb(); writeb_relaxed(v,c); })
+ #define writew(v,c)           ({ __iowmb(); writew_relaxed(v,c); })
+ #define writel(v,c)           ({ __iowmb(); writel_relaxed(v,c); })
++#define writesb(p,d,l)                ({ __iowmb(); __raw_writesb(p,d,l); })
++#define writesw(p,d,l)                ({ __iowmb(); __raw_writesw(p,d,l); })
++#define writesl(p,d,l)                ({ __iowmb(); __raw_writesl(p,d,l); })
+ 
+ /*
+  * Relaxed API for drivers which can handle barrier ordering themselves
+diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h
+index 8a3b56dfd35d..8f94addd9bce 100644
+--- a/arch/arm/mach-mmp/cputype.h
++++ b/arch/arm/mach-mmp/cputype.h
+@@ -43,10 +43,12 @@ static inline int cpu_is_pxa910(void)
+ #define cpu_is_pxa910()       (0)
+ #endif
+ 
+-#ifdef CONFIG_CPU_MMP2
++#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
+ static inline int cpu_is_mmp2(void)
+ {
+-      return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
++      return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
++              (((mmp_chip_id & 0xfff) == 0x410) ||
++               ((mmp_chip_id & 0xfff) == 0x610));
+ }
+ #else
+ #define cpu_is_mmp2() (0)
+diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
+index a134d8a13d00..11d699af30ed 100644
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -359,14 +359,16 @@ v7_dma_inv_range:
+       ALT_UP(W(nop))
+ #endif
+       mcrne   p15, 0, r0, c7, c14, 1          @ clean & invalidate D / U line
++      addne   r0, r0, r2
+ 
+       tst     r1, r3
+       bic     r1, r1, r3
+       mcrne   p15, 0, r1, c7, c14, 1          @ clean & invalidate D / U line
+-1:
+-      mcr     p15, 0, r0, c7, c6, 1           @ invalidate D / U line
+-      add     r0, r0, r2
+       cmp     r0, r1
++1:
++      mcrlo   p15, 0, r0, c7, c6, 1           @ invalidate D / U line
++      addlo   r0, r0, r2
++      cmplo   r0, r1
+       blo     1b
+       dsb     st
+       ret     lr
+diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
+index 816a7e44e6f1..d29927740a19 100644
+--- a/arch/arm/mm/cache-v7m.S
++++ b/arch/arm/mm/cache-v7m.S
+@@ -73,9 +73,11 @@
+ /*
+  * dcimvac: Invalidate data cache line by MVA to PoC
+  */
+-.macro dcimvac, rt, tmp
+-      v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
++.irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
++.macro dcimvac\c, rt, tmp
++      v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
+ .endm
++.endr
+ 
+ /*
+  * dccmvau: Clean data cache line by MVA to PoU
+@@ -369,14 +371,16 @@ v7m_dma_inv_range:
+       tst     r0, r3
+       bic     r0, r0, r3
+       dccimvacne r0, r3
++      addne   r0, r0, r2
+       subne   r3, r2, #1      @ restore r3, corrupted by v7m's dccimvac
+       tst     r1, r3
+       bic     r1, r1, r3
+       dccimvacne r1, r3
+-1:
+-      dcimvac r0, r3
+-      add     r0, r0, r2
+       cmp     r0, r1
++1:
++      dcimvaclo r0, r3
++      addlo   r0, r0, r2
++      cmplo   r0, r1
+       blo     1b
+       dsb     st
+       ret     lr
+diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
+index dab616a33b8d..f2197654be07 100644
+--- a/arch/powerpc/kernel/msi.c
++++ b/arch/powerpc/kernel/msi.c
+@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
+ {
+       struct pci_controller *phb = pci_bus_to_host(dev->bus);
+ 
+-      phb->controller_ops.teardown_msi_irqs(dev);
++      /*
++       * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
++       * so check the pointer again.
++       */
++      if (phb->controller_ops.teardown_msi_irqs)
++              phb->controller_ops.teardown_msi_irqs(dev);
+ }
+diff --git a/arch/x86/include/asm/qspinlock.h 
b/arch/x86/include/asm/qspinlock.h
+index eaba08076030..9e78e963afb8 100644
+--- a/arch/x86/include/asm/qspinlock.h
++++ b/arch/x86/include/asm/qspinlock.h
+@@ -4,6 +4,29 @@
+ #include <asm/cpufeature.h>
+ #include <asm-generic/qspinlock_types.h>
+ #include <asm/paravirt.h>
++#include <asm/rmwcc.h>
++
++#define _Q_PENDING_LOOPS      (1 << 9)
++
++#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
++
++static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
++{
++      GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
++                       "I", _Q_PENDING_OFFSET, "%0", c);
++}
++
++static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock 
*lock)
++{
++      u32 val = 0;
++
++      if (__queued_RMW_btsl(lock))
++              val |= _Q_PENDING_VAL;
++
++      val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
++
++      return val;
++}
+ 
+ #define       queued_spin_unlock queued_spin_unlock
+ /**
+@@ -14,7 +37,7 @@
+  */
+ static inline void native_queued_spin_unlock(struct qspinlock *lock)
+ {
+-      smp_store_release((u8 *)lock, 0);
++      smp_store_release(&lock->locked, 0);
+ }
+ 
+ #ifdef CONFIG_PARAVIRT_SPINLOCKS
+diff --git a/arch/x86/include/asm/qspinlock_paravirt.h 
b/arch/x86/include/asm/qspinlock_paravirt.h
+index 9d55f9b6e167..fc75415ae971 100644
+--- a/arch/x86/include/asm/qspinlock_paravirt.h
++++ b/arch/x86/include/asm/qspinlock_paravirt.h
+@@ -21,8 +21,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
+  *
+  * void __pv_queued_spin_unlock(struct qspinlock *lock)
+  * {
+- *    struct __qspinlock *l = (void *)lock;
+- *    u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
++ *    u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
+  *
+  *    if (likely(lockval == _Q_LOCKED_VAL))
+  *            return;
+diff --git a/arch/x86/platform/efi/early_printk.c 
b/arch/x86/platform/efi/early_printk.c
+index 5fdacb322ceb..c3e6be110b7d 100644
+--- a/arch/x86/platform/efi/early_printk.c
++++ b/arch/x86/platform/efi/early_printk.c
+@@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, 
unsigned int num)
+                       num--;
+               }
+ 
+-              if (efi_x >= si->lfb_width) {
++              if (efi_x + font->width > si->lfb_width) {
+                       efi_x = 0;
+                       efi_y += font->height;
+               }
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index a166359ad5d4..35be49f5791d 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4476,6 +4476,7 @@ static const struct ata_blacklist_entry 
ata_device_blacklist [] = {
+       { "SSD*INTEL*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Samsung*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "SAMSUNG*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
++      { "SAMSUNG*MZ7KM*",             NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "ST[1248][0248]0[FH]*",       NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 
+       /*
+diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
+index 61893fe73251..18b6c9b55b95 100644
+--- a/drivers/clk/mmp/clk.c
++++ b/drivers/clk/mmp/clk.c
+@@ -182,7 +182,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int 
id,
+               pr_err("CLK %d has invalid pointer %p\n", id, clk);
+               return;
+       }
+-      if (id > unit->nr_clks) {
++      if (id >= unit->nr_clks) {
+               pr_err("CLK %d is invalid\n", id);
+               return;
+       }
+diff --git a/drivers/clk/mvebu/cp110-system-controller.c 
b/drivers/clk/mvebu/cp110-system-controller.c
+index f2303da7fda7..465953c75320 100644
+--- a/drivers/clk/mvebu/cp110-system-controller.c
++++ b/drivers/clk/mvebu/cp110-system-controller.c
+@@ -172,11 +172,11 @@ static struct clk *cp110_of_clk_get(struct 
of_phandle_args *clkspec, void *data)
+       unsigned int idx = clkspec->args[1];
+ 
+       if (type == CP110_CLK_TYPE_CORE) {
+-              if (idx > CP110_MAX_CORE_CLOCKS)
++              if (idx >= CP110_MAX_CORE_CLOCKS)
+                       return ERR_PTR(-EINVAL);
+               return clk_data->clks[idx];
+       } else if (type == CP110_CLK_TYPE_GATABLE) {
+-              if (idx > CP110_MAX_GATABLE_CLOCKS)
++              if (idx >= CP110_MAX_GATABLE_CLOCKS)
+                       return ERR_PTR(-EINVAL);
+               return clk_data->clks[CP110_MAX_CORE_CLOCKS + idx];
+       }
+diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
+index 7a86e24e2687..5e0d3e561b04 100644
+--- a/drivers/gpu/drm/ast/ast_fb.c
++++ b/drivers/gpu/drm/ast/ast_fb.c
+@@ -286,6 +286,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
+ {
+       struct ast_framebuffer *afb = &afbdev->afb;
+ 
++      drm_crtc_force_disable_all(dev);
+       drm_fb_helper_unregister_fbi(&afbdev->helper);
+       drm_fb_helper_release_fbi(&afbdev->helper);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
b/drivers/gpu/drm/i915/intel_lrc.c
+index fd11be6b23b9..62bcc770a181 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -386,8 +386,13 @@ static u64 execlists_update_context(struct 
drm_i915_gem_request *rq)
+        * may not be visible to the HW prior to the completion of the UC
+        * register write and that we may begin execution from the context
+        * before its image is complete leading to invalid PD chasing.
++       *
++       * Furthermore, Braswell, at least, wants a full mb to be sure that
++       * the writes are coherent in memory (visible to the GPU) prior to
++       * execution, and not just visible to other CPUs (as is the result of
++       * wmb).
+        */
+-      wmb();
++      mb();
+       return ce->lrc_desc;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/msm_atomic.c 
b/drivers/gpu/drm/msm/msm_atomic.c
+index 73bae382eac3..5c58a98f67c0 100644
+--- a/drivers/gpu/drm/msm/msm_atomic.c
++++ b/drivers/gpu/drm/msm/msm_atomic.c
+@@ -98,7 +98,12 @@ static void msm_atomic_wait_for_commit_done(struct 
drm_device *dev,
+               if (old_state->legacy_cursor_update)
+                       continue;
+ 
++              if (drm_crtc_vblank_get(crtc))
++                      continue;
++
+               kms->funcs->wait_for_crtc_commit_done(kms, crtc);
++
++              drm_crtc_vblank_put(crtc);
+       }
+ }
+ 
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c 
b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+index f2033ab36f37..8c8cbe837e61 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+@@ -478,11 +478,6 @@ static int rockchip_drm_platform_remove(struct 
platform_device *pdev)
+       return 0;
+ }
+ 
+-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+-{
+-      rockchip_drm_platform_remove(pdev);
+-}
+-
+ static const struct of_device_id rockchip_drm_dt_ids[] = {
+       { .compatible = "rockchip,display-subsystem", },
+       { /* sentinel */ },
+@@ -492,7 +487,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
+ static struct platform_driver rockchip_drm_platform_driver = {
+       .probe = rockchip_drm_platform_probe,
+       .remove = rockchip_drm_platform_remove,
+-      .shutdown = rockchip_drm_platform_shutdown,
+       .driver = {
+               .name = "rockchip-drm",
+               .of_match_table = rockchip_drm_dt_ids,
+diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
+index 4351a9343058..96a6d5df9b26 100644
+--- a/drivers/i2c/busses/i2c-axxia.c
++++ b/drivers/i2c/busses/i2c-axxia.c
+@@ -74,8 +74,7 @@
+                                MST_STATUS_ND)
+ #define   MST_STATUS_ERR      (MST_STATUS_NAK | \
+                                MST_STATUS_AL  | \
+-                               MST_STATUS_IP  | \
+-                               MST_STATUS_TSS)
++                               MST_STATUS_IP)
+ #define MST_TX_BYTES_XFRD     0x50
+ #define MST_RX_BYTES_XFRD     0x54
+ #define SCL_HIGH_PERIOD               0x80
+@@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev 
*idev)
+                        */
+                       if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
+                               idev->msg_err = -EPROTO;
+-                              i2c_int_disable(idev, ~0);
++                              i2c_int_disable(idev, ~MST_STATUS_TSS);
+                               complete(&idev->msg_complete);
+                               break;
+                       }
+@@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
+ 
+       if (status & MST_STATUS_SCC) {
+               /* Stop completed */
+-              i2c_int_disable(idev, ~0);
++              i2c_int_disable(idev, ~MST_STATUS_TSS);
+               complete(&idev->msg_complete);
+       } else if (status & MST_STATUS_SNS) {
+               /* Transfer done */
+-              i2c_int_disable(idev, ~0);
++              i2c_int_disable(idev, ~MST_STATUS_TSS);
+               if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
+                       axxia_i2c_empty_rx_fifo(idev);
+               complete(&idev->msg_complete);
++      } else if (status & MST_STATUS_TSS) {
++              /* Transfer timeout */
++              idev->msg_err = -ETIMEDOUT;
++              i2c_int_disable(idev, ~MST_STATUS_TSS);
++              complete(&idev->msg_complete);
+       } else if (unlikely(status & MST_STATUS_ERR)) {
+               /* Transfer error */
+               i2c_int_disable(idev, ~0);
+@@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev 
*idev, struct i2c_msg *msg)
+       u32 rx_xfer, tx_xfer;
+       u32 addr_1, addr_2;
+       unsigned long time_left;
++      unsigned int wt_value;
+ 
+       idev->msg = msg;
+       idev->msg_xfrd = 0;
+-      idev->msg_err = 0;
+       reinit_completion(&idev->msg_complete);
+ 
+       if (i2c_m_ten(msg)) {
+@@ -382,9 +386,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, 
struct i2c_msg *msg)
+       else if (axxia_i2c_fill_tx_fifo(idev) != 0)
+               int_mask |= MST_STATUS_TFL;
+ 
++      wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
++      /* Disable wait timer temporarly */
++      writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
++      /* Check if timeout error happened */
++      if (idev->msg_err)
++              goto out;
++
+       /* Start manual mode */
+       writel(CMD_MANUAL, idev->base + MST_COMMAND);
+ 
++      writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
++
+       i2c_int_enable(idev, int_mask);
+ 
+       time_left = wait_for_completion_timeout(&idev->msg_complete,
+@@ -395,13 +408,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev 
*idev, struct i2c_msg *msg)
+       if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
+               dev_warn(idev->dev, "busy after xfer\n");
+ 
+-      if (time_left == 0)
++      if (time_left == 0) {
+               idev->msg_err = -ETIMEDOUT;
+-
+-      if (idev->msg_err == -ETIMEDOUT)
+               i2c_recover_bus(&idev->adapter);
++              axxia_i2c_init(idev);
++      }
+ 
+-      if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
++out:
++      if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
++                      idev->msg_err != -ETIMEDOUT)
+               axxia_i2c_init(idev);
+ 
+       return idev->msg_err;
+@@ -409,7 +424,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, 
struct i2c_msg *msg)
+ 
+ static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
+ {
+-      u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
++      u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
+       unsigned long time_left;
+ 
+       reinit_completion(&idev->msg_complete);
+@@ -436,6 +451,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg 
msgs[], int num)
+       int i;
+       int ret = 0;
+ 
++      idev->msg_err = 0;
++      i2c_int_enable(idev, MST_STATUS_TSS);
++
+       for (i = 0; ret == 0 && i < num; ++i)
+               ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
+ 
+diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
+index efefcfa24a4c..d2178f701b41 100644
+--- a/drivers/i2c/busses/i2c-scmi.c
++++ b/drivers/i2c/busses/i2c-scmi.c
+@@ -364,6 +364,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
+ {
+       struct acpi_smbus_cmi *smbus_cmi;
+       const struct acpi_device_id *id;
++      int ret;
+ 
+       smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
+       if (!smbus_cmi)
+@@ -385,8 +386,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
+       acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
+                           acpi_smbus_cmi_query_methods, NULL, smbus_cmi, 
NULL);
+ 
+-      if (smbus_cmi->cap_info == 0)
++      if (smbus_cmi->cap_info == 0) {
++              ret = -ENODEV;
+               goto err;
++      }
+ 
+       snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
+               "SMBus CMI adapter %s",
+@@ -397,7 +400,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
+       smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       smbus_cmi->adapter.dev.parent = &device->dev;
+ 
+-      if (i2c_add_adapter(&smbus_cmi->adapter)) {
++      ret = i2c_add_adapter(&smbus_cmi->adapter);
++      if (ret) {
+               dev_err(&device->dev, "Couldn't register adapter!\n");
+               goto err;
+       }
+@@ -407,7 +411,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
+ err:
+       kfree(smbus_cmi);
+       device->driver_data = NULL;
+-      return -EIO;
++      return ret;
+ }
+ 
+ static int acpi_smbus_cmi_remove(struct acpi_device *device)
+diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
+index 0c5d3a99468e..b20025a5a8d9 100644
+--- a/drivers/ide/pmac.c
++++ b/drivers/ide/pmac.c
+@@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
+       struct device_node *root = of_find_node_by_path("/");
+       const char *model = of_get_property(root, "model", NULL);
+ 
++      of_node_put(root);
+       /* Get cable type from device-tree. */
+       if (cable && !strncmp(cable, "80-", 3)) {
+               /* Some drives fail to detect 80c cable in PowerBook */
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c 
b/drivers/infiniband/hw/hfi1/user_sdma.c
+index 619475c7d761..4c111162d552 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -151,10 +151,6 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA 
completion ring. Default: 12
+ #define SDMA_REQ_HAVE_AHG   1
+ #define SDMA_REQ_HAS_ERROR  2
+ 
+-#define SDMA_PKT_Q_INACTIVE BIT(0)
+-#define SDMA_PKT_Q_ACTIVE   BIT(1)
+-#define SDMA_PKT_Q_DEFERRED BIT(2)
+-
+ /*
+  * Maximum retry attempts to submit a TX request
+  * before putting the process to sleep.
+@@ -408,7 +404,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata 
*uctxt, struct file *fp)
+       pq->ctxt = uctxt->ctxt;
+       pq->subctxt = fd->subctxt;
+       pq->n_max_reqs = hfi1_sdma_comp_ring_size;
+-      pq->state = SDMA_PKT_Q_INACTIVE;
+       atomic_set(&pq->n_reqs, 0);
+       init_waitqueue_head(&pq->wait);
+       atomic_set(&pq->n_locked, 0);
+@@ -491,7 +486,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
+               /* Wait until all requests have been freed. */
+               wait_event_interruptible(
+                       pq->wait,
+-                      (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
++                      !atomic_read(&pq->n_reqs));
+               kfree(pq->reqs);
+               kfree(pq->req_in_use);
+               kmem_cache_destroy(pq->txreq_cache);
+@@ -527,6 +522,13 @@ static u8 dlid_to_selector(u16 dlid)
+       return mapping[hash];
+ }
+ 
++/**
++ * hfi1_user_sdma_process_request() - Process and start a user sdma request
++ * @fp: valid file pointer
++ * @iovec: array of io vectors to process
++ * @dim: overall iovec array size
++ * @count: number of io vector array entries processed
++ */
+ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
+                                  unsigned long dim, unsigned long *count)
+ {
+@@ -768,20 +770,12 @@ int hfi1_user_sdma_process_request(struct file *fp, 
struct iovec *iovec,
+       }
+ 
+       set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
++      pq->state = SDMA_PKT_Q_ACTIVE;
+       /* Send the first N packets in the request to buy us some time */
+       ret = user_sdma_send_pkts(req, pcount);
+       if (unlikely(ret < 0 && ret != -EBUSY))
+               goto free_req;
+ 
+-      /*
+-       * It is possible that the SDMA engine would have processed all the
+-       * submitted packets by the time we get here. Therefore, only set
+-       * packet queue state to ACTIVE if there are still uncompleted
+-       * requests.
+-       */
+-      if (atomic_read(&pq->n_reqs))
+-              xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
+-
+       /*
+        * This is a somewhat blocking send implementation.
+        * The driver will block the caller until all packets of the
+@@ -1526,10 +1520,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq 
*txreq, int status)
+ 
+ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
+ {
+-      if (atomic_dec_and_test(&pq->n_reqs)) {
+-              xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
++      if (atomic_dec_and_test(&pq->n_reqs))
+               wake_up(&pq->wait);
+-      }
+ }
+ 
+ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h 
b/drivers/infiniband/hw/hfi1/user_sdma.h
+index 39001714f551..09dd843a13de 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -53,6 +53,11 @@
+ 
+ extern uint extended_psn;
+ 
++enum pkt_q_sdma_state {
++      SDMA_PKT_Q_ACTIVE,
++      SDMA_PKT_Q_DEFERRED,
++};
++
+ struct hfi1_user_sdma_pkt_q {
+       struct list_head list;
+       unsigned ctxt;
+@@ -65,7 +70,7 @@ struct hfi1_user_sdma_pkt_q {
+       struct user_sdma_request *reqs;
+       unsigned long *req_in_use;
+       struct iowait busy;
+-      unsigned state;
++      enum pkt_q_sdma_state state;
+       wait_queue_head_t wait;
+       unsigned long unpinned;
+       struct mmu_rb_handler *handler;
+diff --git a/drivers/input/keyboard/omap4-keypad.c 
b/drivers/input/keyboard/omap4-keypad.c
+index 6639b2b8528a..f78c464899db 100644
+--- a/drivers/input/keyboard/omap4-keypad.c
++++ b/drivers/input/keyboard/omap4-keypad.c
+@@ -60,8 +60,18 @@
+ 
+ /* OMAP4 values */
+ #define OMAP4_VAL_IRQDISABLE          0x0
+-#define OMAP4_VAL_DEBOUNCINGTIME      0x7
+-#define OMAP4_VAL_PVT                 0x7
++
++/*
++ * Errata i689: If a key is released for a time shorter than debounce time,
++ * the keyboard will idle and never detect the key release. The workaround
++ * is to use at least a 12ms debounce time. See omap5432 TRM chapter
++ * "26.4.6.2 Keyboard Controller Timer" for more information.
++ */
++#define OMAP4_KEYPAD_PTV_DIV_128        0x6
++#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv)     \
++      ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
++#define OMAP4_VAL_DEBOUNCINGTIME_16MS                                 \
++      OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
+ 
+ enum {
+       KBD_REVISION_OMAP4 = 0,
+@@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
+ 
+       kbd_writel(keypad_data, OMAP4_KBD_CTRL,
+                       OMAP4_DEF_CTRL_NOSOFTMODE |
+-                      (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
++                      (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
+       kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
+-                      OMAP4_VAL_DEBOUNCINGTIME);
++                      OMAP4_VAL_DEBOUNCINGTIME_16MS);
+       /* clear pending interrupts */
+       kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+                        kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
+diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
+index be3c49fa7382..a4bf14e21b5e 100644
+--- a/drivers/mmc/host/omap.c
++++ b/drivers/mmc/host/omap.c
+@@ -104,6 +104,7 @@ struct mmc_omap_slot {
+       unsigned int            vdd;
+       u16                     saved_con;
+       u16                     bus_mode;
++      u16                     power_mode;
+       unsigned int            fclk_freq;
+ 
+       struct tasklet_struct   cover_tasklet;
+@@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, 
struct mmc_ios *ios)
+       struct mmc_omap_slot *slot = mmc_priv(mmc);
+       struct mmc_omap_host *host = slot->host;
+       int i, dsor;
+-      int clk_enabled;
++      int clk_enabled, init_stream;
+ 
+       mmc_omap_select_slot(slot, 0);
+ 
+@@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, 
struct mmc_ios *ios)
+               slot->vdd = ios->vdd;
+ 
+       clk_enabled = 0;
++      init_stream = 0;
+       switch (ios->power_mode) {
+       case MMC_POWER_OFF:
+               mmc_omap_set_power(slot, 0, ios->vdd);
+@@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, 
struct mmc_ios *ios)
+       case MMC_POWER_UP:
+               /* Cannot touch dsor yet, just power up MMC */
+               mmc_omap_set_power(slot, 1, ios->vdd);
++              slot->power_mode = ios->power_mode;
+               goto exit;
+       case MMC_POWER_ON:
+               mmc_omap_fclk_enable(host, 1);
+               clk_enabled = 1;
+               dsor |= 1 << 11;
++              if (slot->power_mode != MMC_POWER_ON)
++                      init_stream = 1;
+               break;
+       }
++      slot->power_mode = ios->power_mode;
+ 
+       if (slot->bus_mode != ios->bus_mode) {
+               if (slot->pdata->set_bus_mode != NULL)
+@@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, 
struct mmc_ios *ios)
+       for (i = 0; i < 2; i++)
+               OMAP_MMC_WRITE(host, CON, dsor);
+       slot->saved_con = dsor;
+-      if (ios->power_mode == MMC_POWER_ON) {
++      if (init_stream) {
+               /* worst case at 400kHz, 80 cycles makes 200 microsecs */
+               int usecs = 250;
+ 
+@@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, 
int id)
+       slot->host = host;
+       slot->mmc = mmc;
+       slot->id = id;
++      slot->power_mode = MMC_POWER_UNDEFINED;
+       slot->pdata = &host->pdata->slots[id];
+ 
+       host->slots[id] = slot;
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 6dcc42d79cab..1e2ee97b9240 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2050,6 +2050,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
+                  aggregator->aggregator_identifier);
+ 
+       /* Tell the partner that this port is not suitable for aggregation */
++      port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
++      port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
++      port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+       port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
+       __update_lacpdu_from_port(port);
+       ad_lacpdu_send(port);
+diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
+index 7ce36dbd9b62..a3607d083332 100644
+--- a/drivers/net/dsa/mv88e6060.c
++++ b/drivers/net/dsa/mv88e6060.c
+@@ -114,8 +114,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
+       /* Reset the switch. */
+       REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+                 GLOBAL_ATU_CONTROL_SWRESET |
+-                GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
+-                GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
++                GLOBAL_ATU_CONTROL_LEARNDIS);
+ 
+       /* Wait up to one second for reset to complete. */
+       timeout = jiffies + 1 * HZ;
+@@ -140,13 +139,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
+        */
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
+ 
+-      /* Enable automatic address learning, set the address
+-       * database size to 1024 entries, and set the default aging
+-       * time to 5 minutes.
++      /* Disable automatic address learning.
+        */
+       REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+-                GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
+-                GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
++                GLOBAL_ATU_CONTROL_LEARNDIS);
+ 
+       return 0;
+ }
+diff --git a/drivers/net/ethernet/freescale/fman/fman.c 
b/drivers/net/ethernet/freescale/fman/fman.c
+index dafd9e1baba2..380c4a2f6516 100644
+--- a/drivers/net/ethernet/freescale/fman/fman.c
++++ b/drivers/net/ethernet/freescale/fman/fman.c
+@@ -2817,7 +2817,7 @@ static struct fman *read_dts_node(struct platform_device 
*of_dev)
+       if (!muram_node) {
+               dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
+                       __func__);
+-              goto fman_node_put;
++              goto fman_free;
+       }
+ 
+       err = of_address_to_resource(muram_node, 0,
+@@ -2826,11 +2826,10 @@ static struct fman *read_dts_node(struct 
platform_device *of_dev)
+               of_node_put(muram_node);
+               dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
+                       __func__, err);
+-              goto fman_node_put;
++              goto fman_free;
+       }
+ 
+       of_node_put(muram_node);
+-      of_node_put(fm_node);
+ 
+       err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+       if (err < 0) {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig 
b/drivers/net/ethernet/mellanox/mlx4/Kconfig
+index 5098e7f21987..a0eb4e4bc525 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
++++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
+@@ -5,7 +5,7 @@
+ config MLX4_EN
+       tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
+       depends on MAY_USE_DEVLINK
+-      depends on PCI
++      depends on PCI && NETDEVICES && ETHERNET && INET
+       select MLX4_CORE
+       select PTP_1588_CLOCK
+       ---help---
+diff --git a/drivers/net/wireless/mac80211_hwsim.c 
b/drivers/net/wireless/mac80211_hwsim.c
+index 0852a1aad075..780acf23fd19 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3403,16 +3403,16 @@ static int __init init_mac80211_hwsim(void)
+       if (err)
+               goto out_unregister_pernet;
+ 
++      err = hwsim_init_netlink();
++      if (err)
++              goto out_unregister_driver;
++
+       hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
+       if (IS_ERR(hwsim_class)) {
+               err = PTR_ERR(hwsim_class);
+-              goto out_unregister_driver;
++              goto out_exit_netlink;
+       }
+ 
+-      err = hwsim_init_netlink();
+-      if (err < 0)
+-              goto out_unregister_driver;
+-
+       for (i = 0; i < radios; i++) {
+               struct hwsim_new_radio_params param = { 0 };
+ 
+@@ -3518,6 +3518,8 @@ out_free_mon:
+       free_netdev(hwsim_mon);
+ out_free_radios:
+       mac80211_hwsim_free();
++out_exit_netlink:
++      hwsim_exit_netlink();
+ out_unregister_driver:
+       platform_driver_unregister(&mac80211_hwsim_driver);
+ out_unregister_pernet:
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 2dfd877974d7..486393fa4f3e 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -524,6 +524,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct 
ib_wc *wc)
+ {
+       struct nvmet_rdma_rsp *rsp =
+               container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
++      struct nvmet_rdma_queue *queue = cq->cq_context;
+ 
+       nvmet_rdma_release_rsp(rsp);
+ 
+@@ -531,7 +532,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct 
ib_wc *wc)
+                    wc->status != IB_WC_WR_FLUSH_ERR)) {
+               pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
+                       wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+-              nvmet_rdma_error_comp(rsp->queue);
++              nvmet_rdma_error_comp(queue);
+       }
+ }
+ 
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c 
b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+index a7c81e988656..383977ea3a3c 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
+       SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+                 SUNXI_FUNCTION(0x0, "gpio_in"),
+                 SUNXI_FUNCTION(0x1, "gpio_out"),
+-                SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),  /* PH_EINT11 */
++                SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
+ };
+ 
+ static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index 3e8fd33c2576..71eee39520f0 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -47,49 +47,83 @@ struct snvs_rtc_data {
+       struct clk *clk;
+ };
+ 
++/* Read 64 bit timer register, which could be in inconsistent state */
++static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
++{
++      u32 msb, lsb;
++
++      regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &msb);
++      regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &lsb);
++      return (u64)msb << 32 | lsb;
++}
++
++/* Read the secure real time counter, taking care to deal with the cases of 
the
++ * counter updating while being read.
++ */
+ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ {
+       u64 read1, read2;
+-      u32 val;
++      unsigned int timeout = 100;
+ 
++      /* As expected, the registers might update between the read of the LSB
++       * reg and the MSB reg.  It's also possible that one register might be
++       * in partially modified state as well.
++       */
++      read1 = rtc_read_lpsrt(data);
+       do {
+-              regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
+-              read1 = val;
+-              read1 <<= 32;
+-              regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
+-              read1 |= val;
+-
+-              regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
+-              read2 = val;
+-              read2 <<= 32;
+-              regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
+-              read2 |= val;
+-      } while (read1 != read2);
++              read2 = read1;
++              read1 = rtc_read_lpsrt(data);
++      } while (read1 != read2 && --timeout);
++      if (!timeout)
++              dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT 
Counter read\n");
+ 
+       /* Convert 47-bit counter to 32-bit raw second count */
+       return (u32) (read1 >> CNTR_TO_SECS_SH);
+ }
+ 
+-static void rtc_write_sync_lp(struct snvs_rtc_data *data)
++/* Just read the lsb from the counter, dealing with inconsistent state */
++static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
+ {
+-      u32 count1, count2, count3;
+-      int i;
+-
+-      /* Wait for 3 CKIL cycles */
+-      for (i = 0; i < 3; i++) {
+-              do {
+-                      regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, 
&count1);
+-                      regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, 
&count2);
+-              } while (count1 != count2);
+-
+-              /* Now wait until counter value changes */
+-              do {
+-                      do {
+-                              regmap_read(data->regmap, data->offset + 
SNVS_LPSRTCLR, &count2);
+-                              regmap_read(data->regmap, data->offset + 
SNVS_LPSRTCLR, &count3);
+-                      } while (count2 != count3);
+-              } while (count3 == count1);
++      u32 count1, count2;
++      unsigned int timeout = 100;
++
++      regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
++      do {
++              count2 = count1;
++              regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, 
&count1);
++      } while (count1 != count2 && --timeout);
++      if (!timeout) {
++              dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT 
Counter read\n");
++              return -ETIMEDOUT;
+       }
++
++      *lsb = count1;
++      return 0;
++}
++
++static int rtc_write_sync_lp(struct snvs_rtc_data *data)
++{
++      u32 count1, count2;
++      u32 elapsed;
++      unsigned int timeout = 1000;
++      int ret;
++
++      ret = rtc_read_lp_counter_lsb(data, &count1);
++      if (ret)
++              return ret;
++
++      /* Wait for 3 CKIL cycles, about 61.0-91.5 µs */
++      do {
++              ret = rtc_read_lp_counter_lsb(data, &count2);
++              if (ret)
++                      return ret;
++              elapsed = count2 - count1; /* wrap around _is_ handled! */
++      } while (elapsed < 3 && --timeout);
++      if (!timeout) {
++              dev_err(&data->rtc->dev, "Timeout waiting for LPSRT Counter to 
change\n");
++              return -ETIMEDOUT;
++      }
++      return 0;
+ }
+ 
+ static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
+@@ -173,9 +207,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, 
unsigned int enable)
+                          (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
+                          enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 
0);
+ 
+-      rtc_write_sync_lp(data);
+-
+-      return 0;
++      return rtc_write_sync_lp(data);
+ }
+ 
+ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+@@ -183,10 +215,14 @@ static int snvs_rtc_set_alarm(struct device *dev, struct 
rtc_wkalrm *alrm)
+       struct snvs_rtc_data *data = dev_get_drvdata(dev);
+       struct rtc_time *alrm_tm = &alrm->time;
+       unsigned long time;
++      int ret;
+ 
+       rtc_tm_to_time(alrm_tm, &time);
+ 
+       regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, 
SNVS_LPCR_LPTA_EN, 0);
++      ret = rtc_write_sync_lp(data);
++      if (ret)
++              return ret;
+       regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
+ 
+       /* Clear alarm interrupt status bit */
+diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
+index 33fbe8249fd5..044cffbc45e8 100644
+--- a/drivers/sbus/char/display7seg.c
++++ b/drivers/sbus/char/display7seg.c
+@@ -221,6 +221,7 @@ static int d7s_probe(struct platform_device *op)
+       dev_set_drvdata(&op->dev, p);
+       d7s_device = p;
+       err = 0;
++      of_node_put(opts);
+ 
+ out:
+       return err;
+diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
+index 5609b602c54d..baa9b322520b 100644
+--- a/drivers/sbus/char/envctrl.c
++++ b/drivers/sbus/char/envctrl.c
+@@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
+                       for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
+                               pchild->mon_type[len] = ENVCTRL_NOMON;
+                       }
++                      of_node_put(root_node);
+                       return;
+               }
++              of_node_put(root_node);
+       }
+ 
+       /* Get the monitor channels. */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index cc8f2a7c2463..c79743de48f9 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -2414,8 +2414,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
+ failed:
+               ISCSI_DBG_EH(session,
+                            "failing session reset: Could not log back into "
+-                           "%s, %s [age %d]\n", session->targetname,
+-                           conn->persistent_address, session->age);
++                           "%s [age %d]\n", session->targetname,
++                           session->age);
+               spin_unlock_bh(&session->frwd_lock);
+               mutex_unlock(&session->eh_mutex);
+               return FAILED;
+diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
+index 874e9f085326..fcfbe2dcd025 100644
+--- a/drivers/scsi/vmw_pvscsi.c
++++ b/drivers/scsi/vmw_pvscsi.c
+@@ -1233,8 +1233,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter 
*adapter)
+ 
+ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
+ {
+-      pvscsi_shutdown_intr(adapter);
+-
+       if (adapter->workqueue)
+               destroy_workqueue(adapter->workqueue);
+ 
+@@ -1563,6 +1561,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+ out_reset_adapter:
+       ll_adapter_reset(adapter);
+ out_release_resources:
++      pvscsi_shutdown_intr(adapter);
+       pvscsi_release_resources(adapter);
+       scsi_host_put(host);
+ out_disable_device:
+@@ -1571,6 +1570,7 @@ out_disable_device:
+       return error;
+ 
+ out_release_resources_and_disable:
++      pvscsi_shutdown_intr(adapter);
+       pvscsi_release_resources(adapter);
+       goto out_disable_device;
+ }
+diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
+index 127472bd6a7c..209f314745ab 100644
+--- a/drivers/tty/serial/suncore.c
++++ b/drivers/tty/serial/suncore.c
+@@ -111,6 +111,7 @@ void sunserial_console_termios(struct console *con, struct 
device_node *uart_dp)
+               mode = of_get_property(dp, mode_prop, NULL);
+               if (!mode)
+                       mode = "9600,8,n,1,-";
++              of_node_put(dp);
+       }
+ 
+       cflag = CREAD | HUPCL | CLOCAL;
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index f800f89068db..46f966d7c328 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -559,13 +559,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
+        * executing.
+        */
+ 
+-      if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
+-              sock_set_flag(sk, SOCK_DONE);
+-              vsk->peer_shutdown = SHUTDOWN_MASK;
+-              sk->sk_state = SS_UNCONNECTED;
+-              sk->sk_err = ECONNRESET;
+-              sk->sk_error_report(sk);
+-      }
++      /* If the peer is still valid, no need to reset connection */
++      if (vhost_vsock_get(vsk->remote_addr.svm_cid))
++              return;
++
++      /* If the close timeout is pending, let it expire.  This avoids races
++       * with the timeout callback.
++       */
++      if (vsk->close_work_scheduled)
++              return;
++
++      sock_set_flag(sk, SOCK_DONE);
++      vsk->peer_shutdown = SHUTDOWN_MASK;
++      sk->sk_state = SS_UNCONNECTED;
++      sk->sk_err = ECONNRESET;
++      sk->sk_error_report(sk);
+ }
+ 
+ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+diff --git a/fs/aio.c b/fs/aio.c
+index b1170a7affe2..c3fc80294397 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -40,6 +40,7 @@
+ #include <linux/ramfs.h>
+ #include <linux/percpu-refcount.h>
+ #include <linux/mount.h>
++#include <linux/nospec.h>
+ 
+ #include <asm/kmap_types.h>
+ #include <asm/uaccess.h>
+@@ -1071,6 +1072,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
+       if (!table || id >= table->nr)
+               goto out;
+ 
++      id = array_index_nospec(id, table->nr);
+       ctx = rcu_dereference(table->table[id]);
+       if (ctx && ctx->user_id == ctx_id) {
+               if (percpu_ref_tryget_live(&ctx->users))
+diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
+index e7b478b49985..8bef27b8f85d 100644
+--- a/fs/cifs/Kconfig
++++ b/fs/cifs/Kconfig
+@@ -111,7 +111,7 @@ config CIFS_XATTR
+ 
+ config CIFS_POSIX
+         bool "CIFS POSIX Extensions"
+-        depends on CIFS_XATTR
++        depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
+         help
+           Enabling this option will cause the cifs client to attempt to
+         negotiate a newer dialect with servers, such as Samba 3.0.5
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 1ab91124a93e..53f0012ace42 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -98,8 +98,11 @@ struct nfs_direct_req {
+       struct pnfs_ds_commit_info ds_cinfo;    /* Storage for cinfo */
+       struct work_struct      work;
+       int                     flags;
++      /* for write */
+ #define NFS_ODIRECT_DO_COMMIT         (1)     /* an unstable reply was 
received */
+ #define NFS_ODIRECT_RESCHED_WRITES    (2)     /* write verification failed */
++      /* for read */
++#define NFS_ODIRECT_SHOULD_DIRTY      (3)     /* dirty user-space page after 
read */
+       struct nfs_writeverf    verf;           /* unstable write verifier */
+ };
+ 
+@@ -422,7 +425,8 @@ static void nfs_direct_read_completion(struct 
nfs_pgio_header *hdr)
+               struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+               struct page *page = req->wb_page;
+ 
+-              if (!PageCompound(page) && bytes < hdr->good_bytes)
++              if (!PageCompound(page) && bytes < hdr->good_bytes &&
++                  (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
+                       set_page_dirty(page);
+               bytes += req->wb_bytes;
+               nfs_list_remove_request(req);
+@@ -597,6 +601,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct 
iov_iter *iter)
+       if (!is_sync_kiocb(iocb))
+               dreq->iocb = iocb;
+ 
++      if (iter_is_iovec(iter))
++              dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
++
+       nfs_start_io_direct(inode);
+ 
+       NFS_I(inode)->read_io += count;
+diff --git a/include/asm-generic/qspinlock_types.h 
b/include/asm-generic/qspinlock_types.h
+index 034acd0c4956..d10f1e7d6ba8 100644
+--- a/include/asm-generic/qspinlock_types.h
++++ b/include/asm-generic/qspinlock_types.h
+@@ -29,13 +29,41 @@
+ #endif
+ 
+ typedef struct qspinlock {
+-      atomic_t        val;
++      union {
++              atomic_t val;
++
++              /*
++               * By using the whole 2nd least significant byte for the
++               * pending bit, we can allow better optimization of the lock
++               * acquisition for the pending bit holder.
++               */
++#ifdef __LITTLE_ENDIAN
++              struct {
++                      u8      locked;
++                      u8      pending;
++              };
++              struct {
++                      u16     locked_pending;
++                      u16     tail;
++              };
++#else
++              struct {
++                      u16     tail;
++                      u16     locked_pending;
++              };
++              struct {
++                      u8      reserved[2];
++                      u8      pending;
++                      u8      locked;
++              };
++#endif
++      };
+ } arch_spinlock_t;
+ 
+ /*
+  * Initializier
+  */
+-#define       __ARCH_SPIN_LOCK_UNLOCKED       { ATOMIC_INIT(0) }
++#define       __ARCH_SPIN_LOCK_UNLOCKED       { { .val = ATOMIC_INIT(0) } }
+ 
+ /*
+  * Bitfields in the atomic value:
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index d8535a430caf..fab35daf8759 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -67,6 +67,9 @@ typedef struct compat_sigaltstack {
+       compat_size_t                   ss_size;
+ } compat_stack_t;
+ #endif
++#ifndef COMPAT_MINSIGSTKSZ
++#define COMPAT_MINSIGSTKSZ    MINSIGSTKSZ
++#endif
+ 
+ #define compat_jiffies_to_clock_t(x)  \
+               (((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 1438b7396cb4..335c00209f74 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2919,6 +2919,9 @@ static int do_check(struct bpf_verifier_env *env)
+                       goto process_bpf_exit;
+               }
+ 
++              if (signal_pending(current))
++                      return -EAGAIN;
++
+               if (need_resched())
+                       cond_resched();
+ 
+diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
+index a72f5df643f8..0ed478e10071 100644
+--- a/kernel/locking/qspinlock.c
++++ b/kernel/locking/qspinlock.c
+@@ -75,6 +75,18 @@
+ #define MAX_NODES     4
+ #endif
+ 
++/*
++ * The pending bit spinning loop count.
++ * This heuristic is used to limit the number of lockword accesses
++ * made by atomic_cond_read_relaxed when waiting for the lock to
++ * transition out of the "== _Q_PENDING_VAL" state. We don't spin
++ * indefinitely because there's no guarantee that we'll make forward
++ * progress.
++ */
++#ifndef _Q_PENDING_LOOPS
++#define _Q_PENDING_LOOPS      1
++#endif
++
+ /*
+  * Per-CPU queue node structures; we can never have more than 4 nested
+  * contexts: task, softirq, hardirq, nmi.
+@@ -113,41 +125,18 @@ static inline __pure struct mcs_spinlock 
*decode_tail(u32 tail)
+ 
+ #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
+ 
+-/*
+- * By using the whole 2nd least significant byte for the pending bit, we
+- * can allow better optimization of the lock acquisition for the pending
+- * bit holder.
++#if _Q_PENDING_BITS == 8
++/**
++ * clear_pending - clear the pending bit.
++ * @lock: Pointer to queued spinlock structure
+  *
+- * This internal structure is also used by the set_locked function which
+- * is not restricted to _Q_PENDING_BITS == 8.
++ * *,1,* -> *,0,*
+  */
+-struct __qspinlock {
+-      union {
+-              atomic_t val;
+-#ifdef __LITTLE_ENDIAN
+-              struct {
+-                      u8      locked;
+-                      u8      pending;
+-              };
+-              struct {
+-                      u16     locked_pending;
+-                      u16     tail;
+-              };
+-#else
+-              struct {
+-                      u16     tail;
+-                      u16     locked_pending;
+-              };
+-              struct {
+-                      u8      reserved[2];
+-                      u8      pending;
+-                      u8      locked;
+-              };
+-#endif
+-      };
+-};
++static __always_inline void clear_pending(struct qspinlock *lock)
++{
++      WRITE_ONCE(lock->pending, 0);
++}
+ 
+-#if _Q_PENDING_BITS == 8
+ /**
+  * clear_pending_set_locked - take ownership and clear the pending bit.
+  * @lock: Pointer to queued spinlock structure
+@@ -158,9 +147,7 @@ struct __qspinlock {
+  */
+ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
+ {
+-      struct __qspinlock *l = (void *)lock;
+-
+-      WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
++      WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+ }
+ 
+ /*
+@@ -169,24 +156,33 @@ static __always_inline void 
clear_pending_set_locked(struct qspinlock *lock)
+  * @tail : The new queue tail code word
+  * Return: The previous queue tail code word
+  *
+- * xchg(lock, tail)
++ * xchg(lock, tail), which heads an address dependency
+  *
+  * p,*,* -> n,*,* ; prev = xchg(lock, node)
+  */
+ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
+ {
+-      struct __qspinlock *l = (void *)lock;
+-
+       /*
+        * Use release semantics to make sure that the MCS node is properly
+        * initialized before changing the tail code.
+        */
+-      return (u32)xchg_release(&l->tail,
++      return (u32)xchg_release(&lock->tail,
+                                tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
+ }
+ 
+ #else /* _Q_PENDING_BITS == 8 */
+ 
++/**
++ * clear_pending - clear the pending bit.
++ * @lock: Pointer to queued spinlock structure
++ *
++ * *,1,* -> *,0,*
++ */
++static __always_inline void clear_pending(struct qspinlock *lock)
++{
++      atomic_andnot(_Q_PENDING_VAL, &lock->val);
++}
++
+ /**
+  * clear_pending_set_locked - take ownership and clear the pending bit.
+  * @lock: Pointer to queued spinlock structure
+@@ -228,6 +224,20 @@ static __always_inline u32 xchg_tail(struct qspinlock 
*lock, u32 tail)
+ }
+ #endif /* _Q_PENDING_BITS == 8 */
+ 
++/**
++ * queued_fetch_set_pending_acquire - fetch the whole lock value and set 
pending
++ * @lock : Pointer to queued spinlock structure
++ * Return: The previous lock value
++ *
++ * *,*,* -> *,1,*
++ */
++#ifndef queued_fetch_set_pending_acquire
++static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock 
*lock)
++{
++      return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
++}
++#endif
++
+ /**
+  * set_locked - Set the lock bit and own the lock
+  * @lock: Pointer to queued spinlock structure
+@@ -236,9 +246,7 @@ static __always_inline u32 xchg_tail(struct qspinlock 
*lock, u32 tail)
+  */
+ static __always_inline void set_locked(struct qspinlock *lock)
+ {
+-      struct __qspinlock *l = (void *)lock;
+-
+-      WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
++      WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
+ }
+ 
+ 
+@@ -410,7 +418,7 @@ EXPORT_SYMBOL(queued_spin_unlock_wait);
+ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+ {
+       struct mcs_spinlock *prev, *next, *node;
+-      u32 new, old, tail;
++      u32 old, tail;
+       int idx;
+ 
+       BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+@@ -422,65 +430,58 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, 
u32 val)
+               return;
+ 
+       /*
+-       * wait for in-progress pending->locked hand-overs
++       * Wait for in-progress pending->locked hand-overs with a bounded
++       * number of spins so that we guarantee forward progress.
+        *
+        * 0,1,0 -> 0,0,1
+        */
+       if (val == _Q_PENDING_VAL) {
+-              while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
+-                      cpu_relax();
++              int cnt = _Q_PENDING_LOOPS;
++              val = smp_cond_load_acquire(&lock->val.counter,
++                                             (VAL != _Q_PENDING_VAL) || 
!cnt--);
+       }
+ 
++      /*
++       * If we observe any contention; queue.
++       */
++      if (val & ~_Q_LOCKED_MASK)
++              goto queue;
++
+       /*
+        * trylock || pending
+        *
+        * 0,0,0 -> 0,0,1 ; trylock
+        * 0,0,1 -> 0,1,1 ; pending
+        */
+-      for (;;) {
+-              /*
+-               * If we observe any contention; queue.
+-               */
+-              if (val & ~_Q_LOCKED_MASK)
+-                      goto queue;
+-
+-              new = _Q_LOCKED_VAL;
+-              if (val == new)
+-                      new |= _Q_PENDING_VAL;
+-
+-              /*
+-               * Acquire semantic is required here as the function may
+-               * return immediately if the lock was free.
+-               */
+-              old = atomic_cmpxchg_acquire(&lock->val, val, new);
+-              if (old == val)
+-                      break;
+-
+-              val = old;
+-      }
++      val = queued_fetch_set_pending_acquire(lock);
+ 
+       /*
+-       * we won the trylock
++       * If we observe any contention; undo and queue.
+        */
+-      if (new == _Q_LOCKED_VAL)
+-              return;
++      if (unlikely(val & ~_Q_LOCKED_MASK)) {
++              if (!(val & _Q_PENDING_MASK))
++                      clear_pending(lock);
++              goto queue;
++      }
+ 
+       /*
+-       * we're pending, wait for the owner to go away.
++       * We're pending, wait for the owner to go away.
+        *
+-       * *,1,1 -> *,1,0
++       * 0,1,1 -> 0,1,0
+        *
+        * this wait loop must be a load-acquire such that we match the
+        * store-release that clears the locked bit and create lock
+-       * sequentiality; this is because not all clear_pending_set_locked()
+-       * implementations imply full barriers.
++       * sequentiality; this is because not all
++       * clear_pending_set_locked() implementations imply full
++       * barriers.
+        */
+-      smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
++      if (val & _Q_LOCKED_MASK)
++              smp_cond_load_acquire(&lock->val.counter, !(VAL & 
_Q_LOCKED_MASK));
+ 
+       /*
+        * take ownership and clear the pending bit.
+        *
+-       * *,1,0 -> *,0,1
++       * 0,1,0 -> 0,0,1
+        */
+       clear_pending_set_locked(lock);
+       return;
+@@ -532,16 +533,15 @@ queue:
+        */
+       if (old & _Q_TAIL_MASK) {
+               prev = decode_tail(old);
++
+               /*
+-               * The above xchg_tail() is also a load of @lock which 
generates,
+-               * through decode_tail(), a pointer.
+-               *
+-               * The address dependency matches the RELEASE of xchg_tail()
+-               * such that the access to @prev must happen after.
++               * We must ensure that the stores to @node are observed before
++               * the write to prev->next. The address dependency from
++               * xchg_tail is not sufficient to ensure this because the read
++               * component of xchg_tail is unordered with respect to the
++               * initialisation of @node.
+                */
+-              smp_read_barrier_depends();
+-
+-              WRITE_ONCE(prev->next, node);
++              smp_store_release(&prev->next, node);
+ 
+               pv_wait_node(node, prev);
+               arch_mcs_spin_lock_contended(&node->locked);
+@@ -588,30 +588,27 @@ locked:
+        * claim the lock:
+        *
+        * n,0,0 -> 0,0,1 : lock, uncontended
+-       * *,0,0 -> *,0,1 : lock, contended
++       * *,*,0 -> *,*,1 : lock, contended
+        *
+-       * If the queue head is the only one in the queue (lock value == tail),
+-       * clear the tail code and grab the lock. Otherwise, we only need
+-       * to grab the lock.
++       * If the queue head is the only one in the queue (lock value == tail)
++       * and nobody is pending, clear the tail code and grab the lock.
++       * Otherwise, we only need to grab the lock.
+        */
+-      for (;;) {
+-              /* In the PV case we might already have _Q_LOCKED_VAL set */
+-              if ((val & _Q_TAIL_MASK) != tail) {
+-                      set_locked(lock);
+-                      break;
+-              }
++
++      /* In the PV case we might already have _Q_LOCKED_VAL set */
++      if ((val & _Q_TAIL_MASK) == tail) {
+               /*
+                * The smp_cond_load_acquire() call above has provided the
+-               * necessary acquire semantics required for locking. At most
+-               * two iterations of this loop may be ran.
++               * necessary acquire semantics required for locking.
+                */
+               old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
+               if (old == val)
+-                      goto release;   /* No contention */
+-
+-              val = old;
++                      goto release; /* No contention */
+       }
+ 
++      /* Either somebody is queued behind us or _Q_PENDING_VAL is set */
++      set_locked(lock);
++
+       /*
+        * contended path; wait for next if not observed yet, release.
+        */
+diff --git a/kernel/locking/qspinlock_paravirt.h 
b/kernel/locking/qspinlock_paravirt.h
+index e3b5520005db..af2a24d484aa 100644
+--- a/kernel/locking/qspinlock_paravirt.h
++++ b/kernel/locking/qspinlock_paravirt.h
+@@ -69,10 +69,8 @@ struct pv_node {
+ #define queued_spin_trylock(l)        pv_queued_spin_steal_lock(l)
+ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
+ {
+-      struct __qspinlock *l = (void *)lock;
+-
+       if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
+-          (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
++          (cmpxchg(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
+               qstat_inc(qstat_pv_lock_stealing, true);
+               return true;
+       }
+@@ -87,16 +85,7 @@ static inline bool pv_queued_spin_steal_lock(struct 
qspinlock *lock)
+ #if _Q_PENDING_BITS == 8
+ static __always_inline void set_pending(struct qspinlock *lock)
+ {
+-      struct __qspinlock *l = (void *)lock;
+-
+-      WRITE_ONCE(l->pending, 1);
+-}
+-
+-static __always_inline void clear_pending(struct qspinlock *lock)
+-{
+-      struct __qspinlock *l = (void *)lock;
+-
+-      WRITE_ONCE(l->pending, 0);
++      WRITE_ONCE(lock->pending, 1);
+ }
+ 
+ /*
+@@ -106,10 +95,8 @@ static __always_inline void clear_pending(struct qspinlock 
*lock)
+  */
+ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
+ {
+-      struct __qspinlock *l = (void *)lock;
+-
+-      return !READ_ONCE(l->locked) &&
+-             (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
++      return !READ_ONCE(lock->locked) &&
++             (cmpxchg(&lock->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
+                       == _Q_PENDING_VAL);
+ }
+ #else /* _Q_PENDING_BITS == 8 */
+@@ -118,11 +105,6 @@ static __always_inline void set_pending(struct qspinlock 
*lock)
+       atomic_or(_Q_PENDING_VAL, &lock->val);
+ }
+ 
+-static __always_inline void clear_pending(struct qspinlock *lock)
+-{
+-      atomic_andnot(_Q_PENDING_VAL, &lock->val);
+-}
+-
+ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
+ {
+       int val = atomic_read(&lock->val);
+@@ -353,7 +335,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct 
mcs_spinlock *prev)
+ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
+ {
+       struct pv_node *pn = (struct pv_node *)node;
+-      struct __qspinlock *l = (void *)lock;
+ 
+       /*
+        * If the vCPU is indeed halted, advance its state to match that of
+@@ -372,7 +353,7 @@ static void pv_kick_node(struct qspinlock *lock, struct 
mcs_spinlock *node)
+        * the hash table later on at unlock time, no atomic instruction is
+        * needed.
+        */
+-      WRITE_ONCE(l->locked, _Q_SLOW_VAL);
++      WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
+       (void)pv_hash(lock, pn);
+ }
+ 
+@@ -387,7 +368,6 @@ static u32
+ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
+ {
+       struct pv_node *pn = (struct pv_node *)node;
+-      struct __qspinlock *l = (void *)lock;
+       struct qspinlock **lp = NULL;
+       int waitcnt = 0;
+       int loop;
+@@ -438,13 +418,13 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct 
mcs_spinlock *node)
+                        *
+                        * Matches the smp_rmb() in __pv_queued_spin_unlock().
+                        */
+-                      if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
++                      if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
+                               /*
+                                * The lock was free and now we own the lock.
+                                * Change the lock value back to _Q_LOCKED_VAL
+                                * and unhash the table.
+                                */
+-                              WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
++                              WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
+                               WRITE_ONCE(*lp, NULL);
+                               goto gotlock;
+                       }
+@@ -452,7 +432,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct 
mcs_spinlock *node)
+               WRITE_ONCE(pn->state, vcpu_hashed);
+               qstat_inc(qstat_pv_wait_head, true);
+               qstat_inc(qstat_pv_wait_again, waitcnt);
+-              pv_wait(&l->locked, _Q_SLOW_VAL);
++              pv_wait(&lock->locked, _Q_SLOW_VAL);
+ 
+               /*
+                * Because of lock stealing, the queue head vCPU may not be
+@@ -477,7 +457,6 @@ gotlock:
+ __visible void
+ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
+ {
+-      struct __qspinlock *l = (void *)lock;
+       struct pv_node *node;
+ 
+       if (unlikely(locked != _Q_SLOW_VAL)) {
+@@ -506,7 +485,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, 
u8 locked)
+        * Now that we have a reference to the (likely) blocked pv_node,
+        * release the lock.
+        */
+-      smp_store_release(&l->locked, 0);
++      smp_store_release(&lock->locked, 0);
+ 
+       /*
+        * At this point the memory pointed at by lock can be freed/reused,
+@@ -532,7 +511,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, 
u8 locked)
+ #ifndef __pv_queued_spin_unlock
+ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
+ {
+-      struct __qspinlock *l = (void *)lock;
+       u8 locked;
+ 
+       /*
+@@ -540,7 +518,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock 
*lock)
+        * unhash. Otherwise it would be possible to have multiple @lock
+        * entries, which would be BAD.
+        */
+-      locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0);
++      locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
+       if (likely(locked == _Q_LOCKED_VAL))
+               return;
+ 
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 424306163edc..049929a5f4ce 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -3116,7 +3116,8 @@ int do_sigaction(int sig, struct k_sigaction *act, 
struct k_sigaction *oact)
+ }
+ 
+ static int
+-do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned 
long sp)
++do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned 
long sp,
++              size_t min_ss_size)
+ {
+       stack_t oss;
+       int error;
+@@ -3155,9 +3156,8 @@ do_sigaltstack (const stack_t __user *uss, stack_t 
__user *uoss, unsigned long s
+                       ss_size = 0;
+                       ss_sp = NULL;
+               } else {
+-                      error = -ENOMEM;
+-                      if (ss_size < MINSIGSTKSZ)
+-                              goto out;
++                      if (unlikely(ss_size < min_ss_size))
++                              return -ENOMEM;
+               }
+ 
+               current->sas_ss_sp = (unsigned long) ss_sp;
+@@ -3180,12 +3180,14 @@ out:
+ }
+ SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
+ {
+-      return do_sigaltstack(uss, uoss, current_user_stack_pointer());
++      return do_sigaltstack(uss, uoss, current_user_stack_pointer(),
++                            MINSIGSTKSZ);
+ }
+ 
+ int restore_altstack(const stack_t __user *uss)
+ {
+-      int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
++      int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(),
++                                      MINSIGSTKSZ);
+       /* squash all but EFAULT for now */
+       return err == -EFAULT ? err : 0;
+ }
+@@ -3226,7 +3228,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
+       set_fs(KERNEL_DS);
+       ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
+                            (stack_t __force __user *) &uoss,
+-                           compat_user_stack_pointer());
++                           compat_user_stack_pointer(),
++                           COMPAT_MINSIGSTKSZ);
+       set_fs(seg);
+       if (ret >= 0 && uoss_ptr)  {
+               if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) 
||
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index ef4f16e81283..1407ed20ea93 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -399,7 +399,7 @@ static int __init init_timer_list_procfs(void)
+ {
+       struct proc_dir_entry *pe;
+ 
+-      pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
++      pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
+       if (!pe)
+               return -ENOMEM;
+       return 0;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 2884fe01cb54..8f4227d4cd39 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -4836,6 +4836,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
+       if (ops->flags & FTRACE_OPS_FL_ENABLED)
+               ftrace_shutdown(ops, 0);
+       ops->flags |= FTRACE_OPS_FL_DELETED;
++      ftrace_free_filter(ops);
+       mutex_unlock(&ftrace_lock);
+ }
+ 
+diff --git a/kernel/trace/trace_events_trigger.c 
b/kernel/trace/trace_events_trigger.c
+index 8819944bbcbf..7e6971ba9541 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -742,8 +742,10 @@ int set_trigger_filter(char *filter_str,
+ 
+       /* The filter is for the 'trigger' event, not the triggered event */
+       ret = create_event_filter(file->event_call, filter_str, false, &filter);
+-      if (ret)
+-              goto out;
++      /*
++       * If create_event_filter() fails, filter still needs to be freed.
++       * Which the calling code will do with data->filter.
++       */
+  assign:
+       tmp = rcu_access_pointer(data->filter);
+ 
+diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
+index 245900b98c8e..222c8010bda0 100644
+--- a/lib/interval_tree_test.c
++++ b/lib/interval_tree_test.c
+@@ -1,27 +1,38 @@
+ #include <linux/module.h>
++#include <linux/moduleparam.h>
+ #include <linux/interval_tree.h>
+ #include <linux/random.h>
++#include <linux/slab.h>
+ #include <asm/timex.h>
+ 
+-#define NODES        100
+-#define PERF_LOOPS   100000
+-#define SEARCHES     100
+-#define SEARCH_LOOPS 10000
++#define __param(type, name, init, msg)                \
++      static type name = init;                \
++      module_param(name, type, 0444);         \
++      MODULE_PARM_DESC(name, msg);
++
++__param(int, nnodes, 100, "Number of nodes in the interval tree");
++__param(int, perf_loops, 1000, "Number of iterations modifying the tree");
++
++__param(int, nsearches, 100, "Number of searches to the interval tree");
++__param(int, search_loops, 1000, "Number of iterations searching the tree");
++__param(bool, search_all, false, "Searches will iterate all nodes in the 
tree");
++
++__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
+ 
+ static struct rb_root root = RB_ROOT;
+-static struct interval_tree_node nodes[NODES];
+-static u32 queries[SEARCHES];
++static struct interval_tree_node *nodes = NULL;
++static u32 *queries = NULL;
+ 
+ static struct rnd_state rnd;
+ 
+ static inline unsigned long
+-search(unsigned long query, struct rb_root *root)
++search(struct rb_root *root, unsigned long start, unsigned long last)
+ {
+       struct interval_tree_node *node;
+       unsigned long results = 0;
+ 
+-      for (node = interval_tree_iter_first(root, query, query); node;
+-           node = interval_tree_iter_next(node, query, query))
++      for (node = interval_tree_iter_first(root, start, last); node;
++           node = interval_tree_iter_next(node, start, last))
+               results++;
+       return results;
+ }
+@@ -29,19 +40,22 @@ search(unsigned long query, struct rb_root *root)
+ static void init(void)
+ {
+       int i;
+-      for (i = 0; i < NODES; i++) {
+-              u32 a = prandom_u32_state(&rnd);
+-              u32 b = prandom_u32_state(&rnd);
+-              if (a <= b) {
+-                      nodes[i].start = a;
+-                      nodes[i].last = b;
+-              } else {
+-                      nodes[i].start = b;
+-                      nodes[i].last = a;
+-              }
++
++      for (i = 0; i < nnodes; i++) {
++              u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
++              u32 a = (prandom_u32_state(&rnd) >> 4) % b;
++
++              nodes[i].start = a;
++              nodes[i].last = b;
+       }
+-      for (i = 0; i < SEARCHES; i++)
+-              queries[i] = prandom_u32_state(&rnd);
++
++      /*
++       * Limit the search scope to what the user defined.
++       * Otherwise we are merely measuring empty walks,
++       * which is pointless.
++       */
++      for (i = 0; i < nsearches; i++)
++              queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+ }
+ 
+ static int interval_tree_test_init(void)
+@@ -50,6 +64,16 @@ static int interval_tree_test_init(void)
+       unsigned long results;
+       cycles_t time1, time2, time;
+ 
++      nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
++      if (!nodes)
++              return -ENOMEM;
++
++      queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
++      if (!queries) {
++              kfree(nodes);
++              return -ENOMEM;
++      }
++
+       printk(KERN_ALERT "interval tree insert/remove");
+ 
+       prandom_seed_state(&rnd, 3141592653589793238ULL);
+@@ -57,39 +81,46 @@ static int interval_tree_test_init(void)
+ 
+       time1 = get_cycles();
+ 
+-      for (i = 0; i < PERF_LOOPS; i++) {
+-              for (j = 0; j < NODES; j++)
++      for (i = 0; i < perf_loops; i++) {
++              for (j = 0; j < nnodes; j++)
+                       interval_tree_insert(nodes + j, &root);
+-              for (j = 0; j < NODES; j++)
++              for (j = 0; j < nnodes; j++)
+                       interval_tree_remove(nodes + j, &root);
+       }
+ 
+       time2 = get_cycles();
+       time = time2 - time1;
+ 
+-      time = div_u64(time, PERF_LOOPS);
++      time = div_u64(time, perf_loops);
+       printk(" -> %llu cycles\n", (unsigned long long)time);
+ 
+       printk(KERN_ALERT "interval tree search");
+ 
+-      for (j = 0; j < NODES; j++)
++      for (j = 0; j < nnodes; j++)
+               interval_tree_insert(nodes + j, &root);
+ 
+       time1 = get_cycles();
+ 
+       results = 0;
+-      for (i = 0; i < SEARCH_LOOPS; i++)
+-              for (j = 0; j < SEARCHES; j++)
+-                      results += search(queries[j], &root);
++      for (i = 0; i < search_loops; i++)
++              for (j = 0; j < nsearches; j++) {
++                      unsigned long start = search_all ? 0 : queries[j];
++                      unsigned long last = search_all ? max_endpoint : 
queries[j];
++
++                      results += search(&root, start, last);
++              }
+ 
+       time2 = get_cycles();
+       time = time2 - time1;
+ 
+-      time = div_u64(time, SEARCH_LOOPS);
+-      results = div_u64(results, SEARCH_LOOPS);
++      time = div_u64(time, search_loops);
++      results = div_u64(results, search_loops);
+       printk(" -> %llu cycles (%lu results)\n",
+              (unsigned long long)time, results);
+ 
++      kfree(queries);
++      kfree(nodes);
++
+       return -EAGAIN; /* Fail will directly unload the module */
+ }
+ 
+diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
+index 8b3c9dc88262..afedd3770562 100644
+--- a/lib/rbtree_test.c
++++ b/lib/rbtree_test.c
+@@ -1,11 +1,18 @@
+ #include <linux/module.h>
++#include <linux/moduleparam.h>
+ #include <linux/rbtree_augmented.h>
+ #include <linux/random.h>
++#include <linux/slab.h>
+ #include <asm/timex.h>
+ 
+-#define NODES       100
+-#define PERF_LOOPS  100000
+-#define CHECK_LOOPS 100
++#define __param(type, name, init, msg)                \
++      static type name = init;                \
++      module_param(name, type, 0444);         \
++      MODULE_PARM_DESC(name, msg);
++
++__param(int, nnodes, 100, "Number of nodes in the rb-tree");
++__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
++__param(int, check_loops, 100, "Number of iterations modifying and verifying 
the rb-tree");
+ 
+ struct test_node {
+       u32 key;
+@@ -17,7 +24,7 @@ struct test_node {
+ };
+ 
+ static struct rb_root root = RB_ROOT;
+-static struct test_node nodes[NODES];
++static struct test_node *nodes = NULL;
+ 
+ static struct rnd_state rnd;
+ 
+@@ -95,7 +102,7 @@ static void erase_augmented(struct test_node *node, struct 
rb_root *root)
+ static void init(void)
+ {
+       int i;
+-      for (i = 0; i < NODES; i++) {
++      for (i = 0; i < nnodes; i++) {
+               nodes[i].key = prandom_u32_state(&rnd);
+               nodes[i].val = prandom_u32_state(&rnd);
+       }
+@@ -177,6 +184,10 @@ static int __init rbtree_test_init(void)
+       int i, j;
+       cycles_t time1, time2, time;
+ 
++      nodes = kmalloc(nnodes * sizeof(*nodes), GFP_KERNEL);
++      if (!nodes)
++              return -ENOMEM;
++
+       printk(KERN_ALERT "rbtree testing");
+ 
+       prandom_seed_state(&rnd, 3141592653589793238ULL);
+@@ -184,27 +195,27 @@ static int __init rbtree_test_init(void)
+ 
+       time1 = get_cycles();
+ 
+-      for (i = 0; i < PERF_LOOPS; i++) {
+-              for (j = 0; j < NODES; j++)
++      for (i = 0; i < perf_loops; i++) {
++              for (j = 0; j < nnodes; j++)
+                       insert(nodes + j, &root);
+-              for (j = 0; j < NODES; j++)
++              for (j = 0; j < nnodes; j++)
+                       erase(nodes + j, &root);
+       }
+ 
+       time2 = get_cycles();
+       time = time2 - time1;
+ 
+-      time = div_u64(time, PERF_LOOPS);
++      time = div_u64(time, perf_loops);
+       printk(" -> %llu cycles\n", (unsigned long long)time);
+ 
+-      for (i = 0; i < CHECK_LOOPS; i++) {
++      for (i = 0; i < check_loops; i++) {
+               init();
+-              for (j = 0; j < NODES; j++) {
++              for (j = 0; j < nnodes; j++) {
+                       check(j);
+                       insert(nodes + j, &root);
+               }
+-              for (j = 0; j < NODES; j++) {
+-                      check(NODES - j);
++              for (j = 0; j < nnodes; j++) {
++                      check(nnodes - j);
+                       erase(nodes + j, &root);
+               }
+               check(0);
+@@ -216,32 +227,34 @@ static int __init rbtree_test_init(void)
+ 
+       time1 = get_cycles();
+ 
+-      for (i = 0; i < PERF_LOOPS; i++) {
+-              for (j = 0; j < NODES; j++)
++      for (i = 0; i < perf_loops; i++) {
++              for (j = 0; j < nnodes; j++)
+                       insert_augmented(nodes + j, &root);
+-              for (j = 0; j < NODES; j++)
++              for (j = 0; j < nnodes; j++)
+                       erase_augmented(nodes + j, &root);
+       }
+ 
+       time2 = get_cycles();
+       time = time2 - time1;
+ 
+-      time = div_u64(time, PERF_LOOPS);
++      time = div_u64(time, perf_loops);
+       printk(" -> %llu cycles\n", (unsigned long long)time);
+ 
+-      for (i = 0; i < CHECK_LOOPS; i++) {
++      for (i = 0; i < check_loops; i++) {
+               init();
+-              for (j = 0; j < NODES; j++) {
++              for (j = 0; j < nnodes; j++) {
+                       check_augmented(j);
+                       insert_augmented(nodes + j, &root);
+               }
+-              for (j = 0; j < NODES; j++) {
+-                      check_augmented(NODES - j);
++              for (j = 0; j < nnodes; j++) {
++                      check_augmented(nnodes - j);
+                       erase_augmented(nodes + j, &root);
+               }
+               check_augmented(0);
+       }
+ 
++      kfree(nodes);
++
+       return -EAGAIN; /* Fail will directly unload the module */
+ }
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 39451c84c785..6e0aa296f134 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1867,7 +1867,8 @@ static bool ieee80211_sta_wmm_params(struct 
ieee80211_local *local,
+               params[ac].acm = acm;
+               params[ac].uapsd = uapsd;
+ 
+-              if (params[ac].cw_min > params[ac].cw_max) {
++              if (params[ac].cw_min == 0 ||
++                  params[ac].cw_min > params[ac].cw_max) {
+                       sdata_info(sdata,
+                                  "AP has invalid WMM params (CWmin/max=%d/%d 
for ACI %d), using defaults\n",
+                                  params[ac].cw_min, params[ac].cw_max, aci);
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 685e6d225414..1a8df242d26a 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -778,8 +778,15 @@ void xprt_connect(struct rpc_task *task)
+                       return;
+               if (xprt_test_and_set_connecting(xprt))
+                       return;
+-              xprt->stat.connect_start = jiffies;
+-              xprt->ops->connect(xprt, task);
++              /* Race breaker */
++              if (!xprt_connected(xprt)) {
++                      xprt->stat.connect_start = jiffies;
++                      xprt->ops->connect(xprt, task);
++              } else {
++                      xprt_clear_connecting(xprt);
++                      task->tk_status = 0;
++                      rpc_wake_up_queued_task(&xprt->pending, task);
++              }
+       }
+       xprt_release_write(xprt, task);
+ }

Reply via email to