commit:     511a2d39861de3acb2bf076ea7fd141c74cfad0b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Apr 18 10:22:59 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Apr 18 10:22:59 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=511a2d39

Linux patch 4.9.23

 0000_README             |    4 +
 1022_linux-4.9.23.patch | 1236 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1240 insertions(+)

diff --git a/0000_README b/0000_README
index 9eac63e..bc4b2a4 100644
--- a/0000_README
+++ b/0000_README
@@ -131,6 +131,10 @@ Patch:  1021_linux-4.9.22.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.22
 
+Patch:  1022_linux-4.9.23.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.23
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1022_linux-4.9.23.patch b/1022_linux-4.9.23.patch
new file mode 100644
index 0000000..05a3313
--- /dev/null
+++ b/1022_linux-4.9.23.patch
@@ -0,0 +1,1236 @@
+diff --git a/Makefile b/Makefile
+index 4bf4648d97db..0de75976cad5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 9a6e11b6f457..5a4f2eb9d0d5 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -9,6 +9,7 @@ config MIPS
+       select HAVE_CONTEXT_TRACKING
+       select HAVE_GENERIC_DMA_COHERENT
+       select HAVE_IDE
++      select HAVE_IRQ_EXIT_ON_IRQ_STACK
+       select HAVE_OPROFILE
+       select HAVE_PERF_EVENTS
+       select PERF_USE_VMALLOC
+diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
+index 6bf10e796553..956db6e201d1 100644
+--- a/arch/mips/include/asm/irq.h
++++ b/arch/mips/include/asm/irq.h
+@@ -17,6 +17,18 @@
+ 
+ #include <irq.h>
+ 
++#define IRQ_STACK_SIZE                        THREAD_SIZE
++
++extern void *irq_stack[NR_CPUS];
++
++static inline bool on_irq_stack(int cpu, unsigned long sp)
++{
++      unsigned long low = (unsigned long)irq_stack[cpu];
++      unsigned long high = low + IRQ_STACK_SIZE;
++
++      return (low <= sp && sp <= high);
++}
++
+ #ifdef CONFIG_I8259
+ static inline int irq_canonicalize(int irq)
+ {
+diff --git a/arch/mips/include/asm/stackframe.h 
b/arch/mips/include/asm/stackframe.h
+index eebf39549606..2f182bdf024f 100644
+--- a/arch/mips/include/asm/stackframe.h
++++ b/arch/mips/include/asm/stackframe.h
+@@ -216,12 +216,19 @@
+               LONG_S  $25, PT_R25(sp)
+               LONG_S  $28, PT_R28(sp)
+               LONG_S  $31, PT_R31(sp)
++
++              /* Set thread_info if we're coming from user mode */
++              mfc0    k0, CP0_STATUS
++              sll     k0, 3           /* extract cu0 bit */
++              bltz    k0, 9f
++
+               ori     $28, sp, _THREAD_MASK
+               xori    $28, _THREAD_MASK
+ #ifdef CONFIG_CPU_CAVIUM_OCTEON
+               .set    mips64
+               pref    0, 0($28)       /* Prefetch the current pointer */
+ #endif
++9:
+               .set    pop
+               .endm
+ 
+diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
+index fae2f9447792..4be2763f835d 100644
+--- a/arch/mips/kernel/asm-offsets.c
++++ b/arch/mips/kernel/asm-offsets.c
+@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
+       OFFSET(TI_REGS, thread_info, regs);
+       DEFINE(_THREAD_SIZE, THREAD_SIZE);
+       DEFINE(_THREAD_MASK, THREAD_MASK);
++      DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+       BLANK();
+ }
+ 
+diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
+index 52a4fdfc8513..2ac6c2625c13 100644
+--- a/arch/mips/kernel/genex.S
++++ b/arch/mips/kernel/genex.S
+@@ -187,9 +187,44 @@ NESTED(handle_int, PT_SIZE, sp)
+ 
+       LONG_L  s0, TI_REGS($28)
+       LONG_S  sp, TI_REGS($28)
+-      PTR_LA  ra, ret_from_irq
+-      PTR_LA  v0, plat_irq_dispatch
+-      jr      v0
++
++      /*
++       * SAVE_ALL ensures we are using a valid kernel stack for the thread.
++       * Check if we are already using the IRQ stack.
++       */
++      move    s1, sp # Preserve the sp
++
++      /* Get IRQ stack for this CPU */
++      ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
++      lui     k1, %hi(irq_stack)
++#else
++      lui     k1, %highest(irq_stack)
++      daddiu  k1, %higher(irq_stack)
++      dsll    k1, 16
++      daddiu  k1, %hi(irq_stack)
++      dsll    k1, 16
++#endif
++      LONG_SRL        k0, SMP_CPUID_PTRSHIFT
++      LONG_ADDU       k1, k0
++      LONG_L  t0, %lo(irq_stack)(k1)
++
++      # Check if already on IRQ stack
++      PTR_LI  t1, ~(_THREAD_SIZE-1)
++      and     t1, t1, sp
++      beq     t0, t1, 2f
++
++      /* Switch to IRQ stack */
++      li      t1, _IRQ_STACK_SIZE
++      PTR_ADD sp, t0, t1
++
++2:
++      jal     plat_irq_dispatch
++
++      /* Restore sp */
++      move    sp, s1
++
++      j       ret_from_irq
+ #ifdef CONFIG_CPU_MICROMIPS
+       nop
+ #endif
+@@ -262,8 +297,44 @@ NESTED(except_vec_vi_handler, 0, sp)
+ 
+       LONG_L  s0, TI_REGS($28)
+       LONG_S  sp, TI_REGS($28)
+-      PTR_LA  ra, ret_from_irq
+-      jr      v0
++
++      /*
++       * SAVE_ALL ensures we are using a valid kernel stack for the thread.
++       * Check if we are already using the IRQ stack.
++       */
++      move    s1, sp # Preserve the sp
++
++      /* Get IRQ stack for this CPU */
++      ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
++      lui     k1, %hi(irq_stack)
++#else
++      lui     k1, %highest(irq_stack)
++      daddiu  k1, %higher(irq_stack)
++      dsll    k1, 16
++      daddiu  k1, %hi(irq_stack)
++      dsll    k1, 16
++#endif
++      LONG_SRL        k0, SMP_CPUID_PTRSHIFT
++      LONG_ADDU       k1, k0
++      LONG_L  t0, %lo(irq_stack)(k1)
++
++      # Check if already on IRQ stack
++      PTR_LI  t1, ~(_THREAD_SIZE-1)
++      and     t1, t1, sp
++      beq     t0, t1, 2f
++
++      /* Switch to IRQ stack */
++      li      t1, _IRQ_STACK_SIZE
++      PTR_ADD sp, t0, t1
++
++2:
++      jalr    v0
++
++      /* Restore sp */
++      move    sp, s1
++
++      j       ret_from_irq
+       END(except_vec_vi_handler)
+ 
+ /*
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index f25f7eab7307..2b0a371b42af 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -25,6 +25,8 @@
+ #include <linux/atomic.h>
+ #include <asm/uaccess.h>
+ 
++void *irq_stack[NR_CPUS];
++
+ /*
+  * 'what should we do if we get a hw irq event on an illegal vector'.
+  * each architecture has to answer this themselves.
+@@ -58,6 +60,15 @@ void __init init_IRQ(void)
+               clear_c0_status(ST0_IM);
+ 
+       arch_init_irq();
++
++      for_each_possible_cpu(i) {
++              int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
++              void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
++
++              irq_stack[i] = s;
++              pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
++                      irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
++      }
+ }
+ 
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index 1652f36acad1..fbbf5fcc695a 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -33,6 +33,7 @@
+ #include <asm/dsemul.h>
+ #include <asm/dsp.h>
+ #include <asm/fpu.h>
++#include <asm/irq.h>
+ #include <asm/msa.h>
+ #include <asm/pgtable.h>
+ #include <asm/mipsregs.h>
+@@ -556,7 +557,19 @@ EXPORT_SYMBOL(unwind_stack_by_address);
+ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+                          unsigned long pc, unsigned long *ra)
+ {
+-      unsigned long stack_page = (unsigned long)task_stack_page(task);
++      unsigned long stack_page = 0;
++      int cpu;
++
++      for_each_possible_cpu(cpu) {
++              if (on_irq_stack(cpu, *sp)) {
++                      stack_page = (unsigned long)irq_stack[cpu];
++                      break;
++              }
++      }
++
++      if (!stack_page)
++              stack_page = (unsigned long)task_stack_page(task);
++
+       return unwind_stack_by_address(stack_page, sp, pc, ra);
+ }
+ #endif
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index ee54ad01f7ac..7b597ec4e9c5 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1474,7 +1474,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct 
blk_mq_tag_set *set,
+       INIT_LIST_HEAD(&tags->page_list);
+ 
+       tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+-                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
++                               GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+                                set->numa_node);
+       if (!tags->rqs) {
+               blk_mq_free_tags(tags);
+@@ -1500,7 +1500,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct 
blk_mq_tag_set *set,
+ 
+               do {
+                       page = alloc_pages_node(set->numa_node,
+-                              GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | 
__GFP_ZERO,
++                              GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | 
__GFP_ZERO,
+                               this_order);
+                       if (page)
+                               break;
+@@ -1521,7 +1521,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct 
blk_mq_tag_set *set,
+                * Allow kmemleak to scan these pages as they contain pointers
+                * to additional allocations like via ops->init_request().
+                */
+-              kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
++              kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
+               entries_per_page = order_to_size(this_order) / rq_size;
+               to_do = min(entries_per_page, set->queue_depth - i);
+               left -= to_do * rq_size;
+diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
+index 851015e652b8..354a16ab5a16 100644
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
+       ctx->dev = caam_jr_alloc();
+ 
+       if (IS_ERR(ctx->dev)) {
+-              dev_err(ctx->dev, "Job Ring Device allocation for transform 
failed\n");
++              pr_err("Job Ring Device allocation for transform failed\n");
+               return PTR_ERR(ctx->dev);
+       }
+ 
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
+index e483b78c6343..98468b96c32f 100644
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -282,7 +282,8 @@ static int deinstantiate_rng(struct device *ctrldev, int 
state_handle_mask)
+                       /* Try to run it through DECO0 */
+                       ret = run_descriptor_deco0(ctrldev, desc, &status);
+ 
+-                      if (ret || status) {
++                      if (ret ||
++                          (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
+                               dev_err(ctrldev,
+                                       "Failed to deinstantiate RNG4 SH%d\n",
+                                       sh_idx);
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index cf04d249a6a4..6b54e02da10c 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -303,6 +303,9 @@ static const struct file_operations dma_buf_fops = {
+       .llseek         = dma_buf_llseek,
+       .poll           = dma_buf_poll,
+       .unlocked_ioctl = dma_buf_ioctl,
++#ifdef CONFIG_COMPAT
++      .compat_ioctl   = dma_buf_ioctl,
++#endif
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 670beebc32f6..923150de46cb 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -240,6 +240,7 @@ static int i915_getparam(struct drm_device *dev, void 
*data,
+       case I915_PARAM_IRQ_ACTIVE:
+       case I915_PARAM_ALLOW_BATCHBUFFER:
+       case I915_PARAM_LAST_DISPATCH:
++      case I915_PARAM_HAS_EXEC_CONSTANTS:
+               /* Reject all old ums/dri params. */
+               return -ENODEV;
+       case I915_PARAM_CHIPSET_ID:
+@@ -266,9 +267,6 @@ static int i915_getparam(struct drm_device *dev, void 
*data,
+       case I915_PARAM_HAS_BSD2:
+               value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+               break;
+-      case I915_PARAM_HAS_EXEC_CONSTANTS:
+-              value = INTEL_GEN(dev_priv) >= 4;
+-              break;
+       case I915_PARAM_HAS_LLC:
+               value = HAS_LLC(dev_priv);
+               break;
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index da832d3cdca7..e0d72457b23c 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1225,7 +1225,7 @@ struct intel_gen6_power_mgmt {
+       unsigned boosts;
+ 
+       /* manual wa residency calculations */
+-      struct intel_rps_ei up_ei, down_ei;
++      struct intel_rps_ei ei;
+ 
+       /*
+        * Protects RPS/RC6 register access and PCU communication.
+@@ -1751,8 +1751,6 @@ struct drm_i915_private {
+ 
+       const struct intel_device_info info;
+ 
+-      int relative_constants_mode;
+-
+       void __iomem *regs;
+ 
+       struct intel_uncore uncore;
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 00eb4814b913..7b2030925825 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -4587,8 +4587,6 @@ i915_gem_load_init(struct drm_device *dev)
+       init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
+       init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
+ 
+-      dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+-
+       init_waitqueue_head(&dev_priv->pending_flip_queue);
+ 
+       dev_priv->mm.interruptible = true;
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 0c400f852a76..2117f172d7a2 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1454,10 +1454,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
+              struct drm_i915_gem_execbuffer2 *args,
+              struct list_head *vmas)
+ {
+-      struct drm_i915_private *dev_priv = params->request->i915;
+       u64 exec_start, exec_len;
+-      int instp_mode;
+-      u32 instp_mask;
+       int ret;
+ 
+       ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
+@@ -1468,56 +1465,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
+       if (ret)
+               return ret;
+ 
+-      instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+-      instp_mask = I915_EXEC_CONSTANTS_MASK;
+-      switch (instp_mode) {
+-      case I915_EXEC_CONSTANTS_REL_GENERAL:
+-      case I915_EXEC_CONSTANTS_ABSOLUTE:
+-      case I915_EXEC_CONSTANTS_REL_SURFACE:
+-              if (instp_mode != 0 && params->engine->id != RCS) {
+-                      DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+-                      return -EINVAL;
+-              }
+-
+-              if (instp_mode != dev_priv->relative_constants_mode) {
+-                      if (INTEL_INFO(dev_priv)->gen < 4) {
+-                              DRM_DEBUG("no rel constants on pre-gen4\n");
+-                              return -EINVAL;
+-                      }
+-
+-                      if (INTEL_INFO(dev_priv)->gen > 5 &&
+-                          instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+-                              DRM_DEBUG("rel surface constants mode invalid 
on gen5+\n");
+-                              return -EINVAL;
+-                      }
+-
+-                      /* The HW changed the meaning on this bit on gen6 */
+-                      if (INTEL_INFO(dev_priv)->gen >= 6)
+-                              instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+-              }
+-              break;
+-      default:
+-              DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
++      if (args->flags & I915_EXEC_CONSTANTS_MASK) {
++              DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
+               return -EINVAL;
+       }
+ 
+-      if (params->engine->id == RCS &&
+-          instp_mode != dev_priv->relative_constants_mode) {
+-              struct intel_ring *ring = params->request->ring;
+-
+-              ret = intel_ring_begin(params->request, 4);
+-              if (ret)
+-                      return ret;
+-
+-              intel_ring_emit(ring, MI_NOOP);
+-              intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+-              intel_ring_emit_reg(ring, INSTPM);
+-              intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+-              intel_ring_advance(ring);
+-
+-              dev_priv->relative_constants_mode = instp_mode;
+-      }
+-
+       if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+               ret = i915_reset_gen7_sol_offsets(params->request);
+               if (ret)
+diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c 
b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+index 1c237d02f30b..755d78832a66 100644
+--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+@@ -233,7 +233,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private 
*dev_priv)
+                               I915_SHRINK_BOUND |
+                               I915_SHRINK_UNBOUND |
+                               I915_SHRINK_ACTIVE);
+-      rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
++      synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
+ 
+       return freed;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 3fc286cd1157..02908e37c228 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -990,68 +990,51 @@ static void vlv_c0_read(struct drm_i915_private 
*dev_priv,
+       ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
+ }
+ 
+-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
+-                       const struct intel_rps_ei *old,
+-                       const struct intel_rps_ei *now,
+-                       int threshold)
+-{
+-      u64 time, c0;
+-      unsigned int mul = 100;
+-
+-      if (old->cz_clock == 0)
+-              return false;
+-
+-      if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+-              mul <<= 8;
+-
+-      time = now->cz_clock - old->cz_clock;
+-      time *= threshold * dev_priv->czclk_freq;
+-
+-      /* Workload can be split between render + media, e.g. SwapBuffers
+-       * being blitted in X after being rendered in mesa. To account for
+-       * this we need to combine both engines into our activity counter.
+-       */
+-      c0 = now->render_c0 - old->render_c0;
+-      c0 += now->media_c0 - old->media_c0;
+-      c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
+-
+-      return c0 >= time;
+-}
+-
+ void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
+ {
+-      vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
+-      dev_priv->rps.up_ei = dev_priv->rps.down_ei;
++      memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
+ }
+ 
+ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
+ {
++      const struct intel_rps_ei *prev = &dev_priv->rps.ei;
+       struct intel_rps_ei now;
+       u32 events = 0;
+ 
+-      if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) 
== 0)
++      if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+               return 0;
+ 
+       vlv_c0_read(dev_priv, &now);
+       if (now.cz_clock == 0)
+               return 0;
+ 
+-      if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
+-              if (!vlv_c0_above(dev_priv,
+-                                &dev_priv->rps.down_ei, &now,
+-                                dev_priv->rps.down_threshold))
+-                      events |= GEN6_PM_RP_DOWN_THRESHOLD;
+-              dev_priv->rps.down_ei = now;
+-      }
++      if (prev->cz_clock) {
++              u64 time, c0;
++              unsigned int mul;
++
++              mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
++              if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
++                      mul <<= 8;
+ 
+-      if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+-              if (vlv_c0_above(dev_priv,
+-                               &dev_priv->rps.up_ei, &now,
+-                               dev_priv->rps.up_threshold))
+-                      events |= GEN6_PM_RP_UP_THRESHOLD;
+-              dev_priv->rps.up_ei = now;
++              time = now.cz_clock - prev->cz_clock;
++              time *= dev_priv->czclk_freq;
++
++              /* Workload can be split between render + media,
++               * e.g. SwapBuffers being blitted in X after being rendered in
++               * mesa. To account for this we need to combine both engines
++               * into our activity counter.
++               */
++              c0 = now.render_c0 - prev->render_c0;
++              c0 += now.media_c0 - prev->media_c0;
++              c0 *= mul;
++
++              if (c0 > time * dev_priv->rps.up_threshold)
++                      events = GEN6_PM_RP_UP_THRESHOLD;
++              else if (c0 < time * dev_priv->rps.down_threshold)
++                      events = GEN6_PM_RP_DOWN_THRESHOLD;
+       }
+ 
++      dev_priv->rps.ei = now;
+       return events;
+ }
+ 
+@@ -4490,7 +4473,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+       /* Let's track the enabled rps events */
+       if (IS_VALLEYVIEW(dev_priv))
+               /* WaGsvRC0ResidencyMethod:vlv */
+-              dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | 
GEN6_PM_RP_UP_EI_EXPIRED;
++              dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+       else
+               dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ 
+@@ -4531,6 +4514,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+       if (!IS_GEN2(dev_priv))
+               dev->vblank_disable_immediate = true;
+ 
++      /* Most platforms treat the display irq block as an always-on
++       * power domain. vlv/chv can disable it at runtime and need
++       * special care to avoid writing any of the display block registers
++       * outside of the power domain. We defer setting up the display irqs
++       * in this case to the runtime pm.
++       */
++      dev_priv->display_irqs_enabled = true;
++      if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
++              dev_priv->display_irqs_enabled = false;
++
+       dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+       dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
+index b9be8a6141d8..5dc6082639db 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -3696,10 +3696,6 @@ static void intel_update_pipe_config(struct intel_crtc 
*crtc,
+       /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
+       crtc->base.mode = crtc->base.state->mode;
+ 
+-      DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
+-                    old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
+-                    pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+-
+       /*
+        * Update pipe size and adjust fitter if needed: the reason for this is
+        * that in compute_mode_changes we check the native mode (not the pfit
+@@ -4832,23 +4828,17 @@ static void skylake_pfit_enable(struct intel_crtc 
*crtc)
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc->config->scaler_state;
+ 
+-      DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
+-
+       if (crtc->config->pch_pfit.enabled) {
+               int id;
+ 
+-              if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
+-                      DRM_ERROR("Requesting pfit without getting a scaler 
first\n");
++              if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
+                       return;
+-              }
+ 
+               id = scaler_state->scaler_id;
+               I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+                       PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+               I915_WRITE(SKL_PS_WIN_POS(pipe, id), 
crtc->config->pch_pfit.pos);
+               I915_WRITE(SKL_PS_WIN_SZ(pipe, id), 
crtc->config->pch_pfit.size);
+-
+-              DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", 
crtc->config, id);
+       }
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_hotplug.c 
b/drivers/gpu/drm/i915/intel_hotplug.c
+index 334d47b5811a..db3afdf698ca 100644
+--- a/drivers/gpu/drm/i915/intel_hotplug.c
++++ b/drivers/gpu/drm/i915/intel_hotplug.c
+@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct 
work_struct *work)
+                       }
+               }
+       }
+-      if (dev_priv->display.hpd_irq_setup)
++      if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+ 
+@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private 
*dev_priv,
+               }
+       }
+ 
+-      if (storm_detected)
++      if (storm_detected && dev_priv->display_irqs_enabled)
+               dev_priv->display.hpd_irq_setup(dev_priv);
+       spin_unlock(&dev_priv->irq_lock);
+ 
+@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
+        * Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked checks happy.
+        */
+-      spin_lock_irq(&dev_priv->irq_lock);
+-      if (dev_priv->display.hpd_irq_setup)
+-              dev_priv->display.hpd_irq_setup(dev_priv);
+-      spin_unlock_irq(&dev_priv->irq_lock);
++      if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
++              spin_lock_irq(&dev_priv->irq_lock);
++              if (dev_priv->display_irqs_enabled)
++                      dev_priv->display.hpd_irq_setup(dev_priv);
++              spin_unlock_irq(&dev_priv->irq_lock);
++      }
+ }
+ 
+ static void i915_hpd_poll_init_work(struct work_struct *work)
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
b/drivers/gpu/drm/i915/intel_lrc.c
+index 4147e51cf893..67db1577ee49 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -2152,42 +2152,30 @@ static int execlists_context_deferred_alloc(struct 
i915_gem_context *ctx,
+ 
+ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
+ {
++      struct i915_gem_context *ctx = dev_priv->kernel_context;
+       struct intel_engine_cs *engine;
+-      struct i915_gem_context *ctx;
+-
+-      /* Because we emit WA_TAIL_DWORDS there may be a disparity
+-       * between our bookkeeping in ce->ring->head and ce->ring->tail and
+-       * that stored in context. As we only write new commands from
+-       * ce->ring->tail onwards, everything before that is junk. If the GPU
+-       * starts reading from its RING_HEAD from the context, it may try to
+-       * execute that junk and die.
+-       *
+-       * So to avoid that we reset the context images upon resume. For
+-       * simplicity, we just zero everything out.
+-       */
+-      list_for_each_entry(ctx, &dev_priv->context_list, link) {
+-              for_each_engine(engine, dev_priv) {
+-                      struct intel_context *ce = &ctx->engine[engine->id];
+-                      u32 *reg;
+ 
+-                      if (!ce->state)
+-                              continue;
++      for_each_engine(engine, dev_priv) {
++              struct intel_context *ce = &ctx->engine[engine->id];
++              void *vaddr;
++              uint32_t *reg_state;
+ 
+-                      reg = i915_gem_object_pin_map(ce->state->obj,
+-                                                    I915_MAP_WB);
+-                      if (WARN_ON(IS_ERR(reg)))
+-                              continue;
++              if (!ce->state)
++                      continue;
+ 
+-                      reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
+-                      reg[CTX_RING_HEAD+1] = 0;
+-                      reg[CTX_RING_TAIL+1] = 0;
++              vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
++              if (WARN_ON(IS_ERR(vaddr)))
++                      continue;
+ 
+-                      ce->state->obj->dirty = true;
+-                      i915_gem_object_unpin_map(ce->state->obj);
++              reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ 
+-                      ce->ring->head = ce->ring->tail = 0;
+-                      ce->ring->last_retired_head = -1;
+-                      intel_ring_update_space(ce->ring);
+-              }
++              reg_state[CTX_RING_HEAD+1] = 0;
++              reg_state[CTX_RING_TAIL+1] = 0;
++
++              ce->state->obj->dirty = true;
++              i915_gem_object_unpin_map(ce->state->obj);
++
++              ce->ring->head = 0;
++              ce->ring->tail = 0;
+       }
+ }
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index e559a45ff1f7..2c6d59d4b6d3 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4903,6 +4903,12 @@ static void gen6_set_rps_thresholds(struct 
drm_i915_private *dev_priv, u8 val)
+               break;
+       }
+ 
++      /* When byt can survive without system hang with dynamic
++       * sw freq adjustments, this restriction can be lifted.
++       */
++      if (IS_VALLEYVIEW(dev_priv))
++              goto skip_hw_write;
++
+       I915_WRITE(GEN6_RP_UP_EI,
+                  GT_INTERVAL_FROM_US(dev_priv, ei_up));
+       I915_WRITE(GEN6_RP_UP_THRESHOLD,
+@@ -4923,6 +4929,7 @@ static void gen6_set_rps_thresholds(struct 
drm_i915_private *dev_priv, u8 val)
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_AVG);
+ 
++skip_hw_write:
+       dev_priv->rps.power = new_power;
+       dev_priv->rps.up_threshold = threshold_up;
+       dev_priv->rps.down_threshold = threshold_down;
+@@ -4933,8 +4940,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private 
*dev_priv, u8 val)
+ {
+       u32 mask = 0;
+ 
++      /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
+       if (val > dev_priv->rps.min_freq_softlimit)
+-              mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD 
| GEN6_PM_RP_DOWN_TIMEOUT;
++              mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | 
GEN6_PM_RP_DOWN_TIMEOUT;
+       if (val < dev_priv->rps.max_freq_softlimit)
+               mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+ 
+@@ -5034,7 +5042,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
+ {
+       mutex_lock(&dev_priv->rps.hw_lock);
+       if (dev_priv->rps.enabled) {
+-              if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | 
GEN6_PM_RP_UP_EI_EXPIRED))
++              if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
+                       gen6_rps_reset_ei(dev_priv);
+               I915_WRITE(GEN6_PMINTRMSK,
+                          gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+@@ -7960,10 +7968,10 @@ static bool skl_pcode_try_request(struct 
drm_i915_private *dev_priv, u32 mbox,
+  * @timeout_base_ms: timeout for polling with preemption enabled
+  *
+  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
++ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
+  * The request is acknowledged once the PCODE reply dword equals @reply after
+  * applying @reply_mask. Polling is first attempted with preemption enabled
+- * for @timeout_base_ms and if this times out for another 10 ms with
++ * for @timeout_base_ms and if this times out for another 50 ms with
+  * preemption disabled.
+  *
+  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+@@ -7999,14 +8007,15 @@ int skl_pcode_request(struct drm_i915_private 
*dev_priv, u32 mbox, u32 request,
+        * worst case) _and_ PCODE was busy for some reason even after a
+        * (queued) request and @timeout_base_ms delay. As a workaround retry
+        * the poll with preemption disabled to maximize the number of
+-       * requests. Increase the timeout from @timeout_base_ms to 10ms to
++       * requests. Increase the timeout from @timeout_base_ms to 50ms to
+        * account for interrupts that could reduce the number of these
+-       * requests.
++       * requests, and for any quirks of the PCODE firmware that delays
++       * the request completion.
+        */
+       DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+       WARN_ON_ONCE(timeout_base_ms > 3);
+       preempt_disable();
+-      ret = wait_for_atomic(COND, 10);
++      ret = wait_for_atomic(COND, 50);
+       preempt_enable();
+ 
+ out:
+diff --git a/drivers/i2c/busses/i2c-bcm2835.c 
b/drivers/i2c/busses/i2c-bcm2835.c
+index d4f3239b5686..f283b714aa79 100644
+--- a/drivers/i2c/busses/i2c-bcm2835.c
++++ b/drivers/i2c/busses/i2c-bcm2835.c
+@@ -64,6 +64,7 @@ struct bcm2835_i2c_dev {
+       int irq;
+       struct i2c_adapter adapter;
+       struct completion completion;
++      struct i2c_msg *curr_msg;
+       u32 msg_err;
+       u8 *msg_buf;
+       size_t msg_buf_remaining;
+@@ -126,14 +127,13 @@ static irqreturn_t bcm2835_i2c_isr(int this_irq, void 
*data)
+               return IRQ_HANDLED;
+       }
+ 
+-      if (val & BCM2835_I2C_S_RXD) {
+-              bcm2835_drain_rxfifo(i2c_dev);
+-              if (!(val & BCM2835_I2C_S_DONE))
+-                      return IRQ_HANDLED;
+-      }
+-
+       if (val & BCM2835_I2C_S_DONE) {
+-              if (i2c_dev->msg_buf_remaining)
++              if (i2c_dev->curr_msg->flags & I2C_M_RD) {
++                      bcm2835_drain_rxfifo(i2c_dev);
++                      val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
++              }
++
++              if ((val & BCM2835_I2C_S_RXD) || i2c_dev->msg_buf_remaining)
+                       i2c_dev->msg_err = BCM2835_I2C_S_LEN;
+               else
+                       i2c_dev->msg_err = 0;
+@@ -141,11 +141,16 @@ static irqreturn_t bcm2835_i2c_isr(int this_irq, void 
*data)
+               return IRQ_HANDLED;
+       }
+ 
+-      if (val & BCM2835_I2C_S_TXD) {
++      if (val & BCM2835_I2C_S_TXW) {
+               bcm2835_fill_txfifo(i2c_dev);
+               return IRQ_HANDLED;
+       }
+ 
++      if (val & BCM2835_I2C_S_RXR) {
++              bcm2835_drain_rxfifo(i2c_dev);
++              return IRQ_HANDLED;
++      }
++
+       return IRQ_NONE;
+ }
+ 
+@@ -155,6 +160,7 @@ static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev 
*i2c_dev,
+       u32 c;
+       unsigned long time_left;
+ 
++      i2c_dev->curr_msg = msg;
+       i2c_dev->msg_buf = msg->buf;
+       i2c_dev->msg_buf_remaining = msg->len;
+       reinit_completion(&i2c_dev->completion);
+diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
+index 377947580203..283ff7e17a0f 100644
+--- a/drivers/mtd/bcm47xxpart.c
++++ b/drivers/mtd/bcm47xxpart.c
+@@ -229,12 +229,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+ 
+                       last_trx_part = curr_part - 1;
+ 
+-                      /*
+-                       * We have whole TRX scanned, skip to the next part. Use
+-                       * roundown (not roundup), as the loop will increase
+-                       * offset in next step.
+-                       */
+-                      offset = rounddown(offset + trx->length, blocksize);
++                      /* Jump to the end of TRX */
++                      offset = roundup(offset + trx->length, blocksize);
++                      /* Next loop iteration will increase the offset */
++                      offset -= blocksize;
+                       continue;
+               }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c 
b/drivers/net/ethernet/mellanox/mlx4/cq.c
+index a849da92f857..6b8635378f1f 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
+@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
+ {
+       struct mlx4_cq *cq;
+ 
++      rcu_read_lock();
+       cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
+                              cqn & (dev->caps.num_cqs - 1));
++      rcu_read_unlock();
++
+       if (!cq) {
+               mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
+               return;
+       }
+ 
++      /* Acessing the CQ outside of rcu_read_lock is safe, because
++       * the CQ is freed only after interrupt handling is completed.
++       */
+       ++cq->arm_sn;
+ 
+       cq->comp(cq);
+@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int 
event_type)
+       struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+       struct mlx4_cq *cq;
+ 
+-      spin_lock(&cq_table->lock);
+-
++      rcu_read_lock();
+       cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
+-      if (cq)
+-              atomic_inc(&cq->refcount);
+-
+-      spin_unlock(&cq_table->lock);
++      rcu_read_unlock();
+ 
+       if (!cq) {
+-              mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
++              mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
+               return;
+       }
+ 
++      /* Acessing the CQ outside of rcu_read_lock is safe, because
++       * the CQ is freed only after interrupt handling is completed.
++       */
+       cq->event(cq, event_type);
+-
+-      if (atomic_dec_and_test(&cq->refcount))
+-              complete(&cq->free);
+ }
+ 
+ static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox 
*mailbox,
+@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
+       if (err)
+               return err;
+ 
+-      spin_lock_irq(&cq_table->lock);
++      spin_lock(&cq_table->lock);
+       err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
+-      spin_unlock_irq(&cq_table->lock);
++      spin_unlock(&cq_table->lock);
+       if (err)
+               goto err_icm;
+ 
+@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
+       return 0;
+ 
+ err_radix:
+-      spin_lock_irq(&cq_table->lock);
++      spin_lock(&cq_table->lock);
+       radix_tree_delete(&cq_table->tree, cq->cqn);
+-      spin_unlock_irq(&cq_table->lock);
++      spin_unlock(&cq_table->lock);
+ 
+ err_icm:
+       mlx4_cq_free_icm(dev, cq->cqn);
+@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq 
*cq)
+       if (err)
+               mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, 
cq->cqn);
+ 
++      spin_lock(&cq_table->lock);
++      radix_tree_delete(&cq_table->tree, cq->cqn);
++      spin_unlock(&cq_table->lock);
++
+       
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+       if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+           priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+               synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
+ 
+-      spin_lock_irq(&cq_table->lock);
+-      radix_tree_delete(&cq_table->tree, cq->cqn);
+-      spin_unlock_irq(&cq_table->lock);
+-
+       if (atomic_dec_and_test(&cq->refcount))
+               complete(&cq->free);
+       wait_for_completion(&cq->free);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 4d3ddc2f7e43..5d484581becd 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -444,8 +444,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
+               ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
+ 
+               ring->stride = stride;
+-              if (ring->stride <= TXBB_SIZE)
++              if (ring->stride <= TXBB_SIZE) {
++                      /* Stamp first unused send wqe */
++                      __be32 *ptr = (__be32 *)ring->buf;
++                      __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
++                      *ptr = stamp;
++                      /* Move pointer to start of rx section */
+                       ring->buf += TXBB_SIZE;
++              }
+ 
+               ring->log_stride = ffs(ring->stride) - 1;
+               ring->buf_size = ring->size * ring->stride;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 
b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index c548beaaf910..32f76bf018c3 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int 
slave,
+               put_res(dev, slave, srqn, RES_SRQ);
+               qp->srq = srq;
+       }
++
++      /* Save param3 for dynamic changes from VST back to VGT */
++      qp->param3 = qpc->param3;
+       put_res(dev, slave, rcqn, RES_CQ);
+       put_res(dev, slave, mtt_base, RES_MTT);
+       res_end_move(dev, slave, RES_QP, qpn);
+@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int 
slave,
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_qp *qp;
+       u8 orig_sched_queue;
+-      __be32  orig_param3 = qpc->param3;
+       u8 orig_vlan_control = qpc->pri_path.vlan_control;
+       u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
+       u8 orig_pri_path_fl = qpc->pri_path.fl;
+@@ -3814,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int 
slave,
+        */
+       if (!err) {
+               qp->sched_queue = orig_sched_queue;
+-              qp->param3      = orig_param3;
+               qp->vlan_control = orig_vlan_control;
+               qp->fvl_rx      =  orig_fvl_rx;
+               qp->pri_path_fl = orig_pri_path_fl;
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 
b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+index 4e0c5653054b..b7273be9303d 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+@@ -1422,7 +1422,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
+       cancel_work_sync(&rt2x00dev->intf_work);
+       cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
+       cancel_work_sync(&rt2x00dev->sleep_work);
+-#ifdef CONFIG_RT2X00_LIB_USB
++#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
+       if (rt2x00_is_usb(rt2x00dev)) {
+               usb_kill_anchored_urbs(rt2x00dev->anchor);
+               hrtimer_cancel(&rt2x00dev->txstatus_timer);
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 
b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+index 6005e14213ca..662705e31136 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+@@ -319,10 +319,8 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry 
*entry, void *data)
+                         entry->skb->data, length,
+                         rt2x00usb_interrupt_txdone, entry);
+ 
+-      usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
+       status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
+       if (status) {
+-              usb_unanchor_urb(entry_priv->urb);
+               if (status == -ENODEV)
+                       clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+               set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+@@ -410,10 +408,8 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry 
*entry, void *data)
+                         entry->skb->data, entry->skb->len,
+                         rt2x00usb_interrupt_rxdone, entry);
+ 
+-      usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
+       status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
+       if (status) {
+-              usb_unanchor_urb(entry_priv->urb);
+               if (status == -ENODEV)
+                       clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+               set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+@@ -824,10 +820,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
+       if (retval)
+               goto exit_free_device;
+ 
+-      retval = rt2x00lib_probe_dev(rt2x00dev);
+-      if (retval)
+-              goto exit_free_reg;
+-
+       rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
+                                       sizeof(struct usb_anchor),
+                                       GFP_KERNEL);
+@@ -835,10 +827,17 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
+               retval = -ENOMEM;
+               goto exit_free_reg;
+       }
+-
+       init_usb_anchor(rt2x00dev->anchor);
++
++      retval = rt2x00lib_probe_dev(rt2x00dev);
++      if (retval)
++              goto exit_free_anchor;
++
+       return 0;
+ 
++exit_free_anchor:
++      usb_kill_anchored_urbs(rt2x00dev->anchor);
++
+ exit_free_reg:
+       rt2x00usb_free_reg(rt2x00dev);
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index c28ccf1b5a1f..35fb2bef0e45 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2650,8 +2650,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int 
port1,
+               if (ret < 0)
+                       return ret;
+ 
+-              /* The port state is unknown until the reset completes. */
+-              if (!(portstatus & USB_PORT_STAT_RESET))
++              /*
++               * The port state is unknown until the reset completes.
++               *
++               * On top of that, some chips may require additional time
++               * to re-establish a connection after the reset is complete,
++               * so also wait for the connection to be re-established.
++               */
++              if (!(portstatus & USB_PORT_STAT_RESET) &&
++                  (portstatus & USB_PORT_STAT_CONNECTION))
+                       break;
+ 
+               /* switch to the long delay after two short delay failures */
+diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
+index 516ffb4dc9a0..f419dd999581 100644
+--- a/fs/orangefs/devorangefs-req.c
++++ b/fs/orangefs/devorangefs-req.c
+@@ -402,8 +402,9 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb 
*iocb,
+       /* remove the op from the in progress hash table */
+       op = orangefs_devreq_remove_op(head.tag);
+       if (!op) {
+-              gossip_err("WARNING: No one's waiting for tag %llu\n",
+-                         llu(head.tag));
++              gossip_debug(GOSSIP_DEV_DEBUG,
++                           "%s: No one's waiting for tag %llu\n",
++                           __func__, llu(head.tag));
+               return ret;
+       }
+ 
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index 38887cc5577f..0748a26598fc 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -671,8 +671,10 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
+                */
+               cdm_element_count =
+                       orangefs_prepare_cdm_array(client_debug_array_string);
+-              if (cdm_element_count <= 0)
++              if (cdm_element_count <= 0) {
++                      kfree(new);
+                       goto out;
++              }
+ 
+               for (i = 0; i < cdm_element_count; i++) {
+                       strlcat(new, "\t", string_size);
+@@ -963,13 +965,13 @@ int orangefs_debugfs_new_client_string(void __user *arg)
+       int ret;
+ 
+       ret = copy_from_user(&client_debug_array_string,
+-                                     (void __user *)arg,
+-                                     ORANGEFS_MAX_DEBUG_STRING_LEN);
++                           (void __user *)arg,
++                           ORANGEFS_MAX_DEBUG_STRING_LEN);
+ 
+       if (ret != 0) {
+               pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
+                       __func__);
+-              return -EIO;
++              return -EFAULT;
+       }
+ 
+       /*
+@@ -984,17 +986,18 @@ int orangefs_debugfs_new_client_string(void __user *arg)
+        */
+       client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
+               '\0';
+-      
++
+       pr_info("%s: client debug array string has been received.\n",
+               __func__);
+ 
+       if (!help_string_initialized) {
+ 
+               /* Build a proper debug help string. */
+-              if (orangefs_prepare_debugfs_help_string(0)) {
++              ret = orangefs_prepare_debugfs_help_string(0);
++              if (ret) {
+                       gossip_err("%s: no debug help string \n",
+                                  __func__);
+-                      return -EIO;
++                      return ret;
+               }
+ 
+       }
+@@ -1007,7 +1010,7 @@ int orangefs_debugfs_new_client_string(void __user *arg)
+ 
+       help_string_initialized++;
+ 
+-      return ret;
++      return 0;
+ }
+ 
+ int orangefs_debugfs_new_debug(void __user *arg) 
+diff --git a/fs/orangefs/orangefs-dev-proto.h 
b/fs/orangefs/orangefs-dev-proto.h
+index a3d84ffee905..f380f9ed1b28 100644
+--- a/fs/orangefs/orangefs-dev-proto.h
++++ b/fs/orangefs/orangefs-dev-proto.h
+@@ -50,8 +50,7 @@
+  * Misc constants. Please retain them as multiples of 8!
+  * Otherwise 32-64 bit interactions will be messed up :)
+  */
+-#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000400
+-#define ORANGEFS_MAX_DEBUG_ARRAY_LEN  0x00000800
++#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000800
+ 
+ /*
+  * The maximum number of directory entries in a single request is 96.
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index f2b04a77258d..8ab0974f4ee2 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4235,8 +4235,8 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+               if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
+                       goto out;
+               if (po->tp_version >= TPACKET_V3 &&
+-                  (int)(req->tp_block_size -
+-                        BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
++                  req->tp_block_size <=
++                        BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
+                       goto out;
+               if (unlikely(req->tp_frame_size < po->tp_hdrlen +
+                                       po->tp_reserve))

Reply via email to