commit:     255753c89d48f9d5e6428111ab1439c1ed6d1985
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 13 19:29:52 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 13 19:29:52 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=255753c8

proj/linux-patches: Linux patch 4.20.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1001_linux-4.20.2.patch | 3086 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3090 insertions(+)

diff --git a/0000_README b/0000_README
index 543d775..0e0dc28 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-4.20.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.20.1
 
+Patch:  1001_linux-4.20.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.20.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-4.20.2.patch b/1001_linux-4.20.2.patch
new file mode 100644
index 0000000..212bec9
--- /dev/null
+++ b/1001_linux-4.20.2.patch
@@ -0,0 +1,3086 @@
+diff --git a/Makefile b/Makefile
+index 84d2f8deea30..4ba3dd0bf35d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 20
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Shy Crocodile
+ 
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 6327fd79b0fb..fd59fef9931b 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+       /* If TM bits are set to the reserved value, it's an invalid context */
+       if (MSR_TM_RESV(msr_hi))
+               return 1;
+-      /* Pull in the MSR TM bits from the user context */
++
++      /*
++       * Disabling preemption, since it is unsafe to be preempted
++       * with MSR[TS] set without recheckpointing.
++       */
++      preempt_disable();
++
++      /*
++       * CAUTION:
++       * After regs->MSR[TS] being updated, make sure that get_user(),
++       * put_user() or similar functions are *not* called. These
++       * functions can generate page faults which will cause the process
++       * to be de-scheduled with MSR[TS] set but without calling
++       * tm_recheckpoint(). This can cause a bug.
++       *
++       * Pull in the MSR TM bits from the user context
++       */
+       regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+       /* Now, recheckpoint.  This loads up all of the checkpointed (older)
+        * registers, including FP and V[S]Rs.  After recheckpointing, the
+@@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+       }
+ #endif
+ 
++      preempt_enable();
++
+       return 0;
+ }
+ #endif
+@@ -1140,11 +1158,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ {
+       struct rt_sigframe __user *rt_sf;
+       struct pt_regs *regs = current_pt_regs();
+-      int tm_restore = 0;
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       struct ucontext __user *uc_transact;
+       unsigned long msr_hi;
+       unsigned long tmp;
++      int tm_restore = 0;
+ #endif
+       /* Always make any pending restarted system calls return -EINTR */
+       current->restart_block.fn = do_no_restart_syscall;
+@@ -1192,19 +1210,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
+                               goto bad;
+               }
+       }
+-      if (!tm_restore) {
+-              /*
+-               * Unset regs->msr because ucontext MSR TS is not
+-               * set, and recheckpoint was not called. This avoid
+-               * hitting a TM Bad thing at RFID
+-               */
+-              regs->msr &= ~MSR_TS_MASK;
+-      }
+-      /* Fall through, for non-TM restore */
+-#endif
+       if (!tm_restore)
+-              if (do_setcontext(&rt_sf->uc, regs, 1))
+-                      goto bad;
++              /* Fall through, for non-TM restore */
++#endif
++      if (do_setcontext(&rt_sf->uc, regs, 1))
++              goto bad;
+ 
+       /*
+        * It's not clear whether or why it is desirable to save the
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index daa28cb72272..bbd1c73243d7 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct 
*tsk,
+       if (MSR_TM_RESV(msr))
+               return -EINVAL;
+ 
+-      /* pull in MSR TS bits from user context */
+-      regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+-
+-      /*
+-       * Ensure that TM is enabled in regs->msr before we leave the signal
+-       * handler. It could be the case that (a) user disabled the TM bit
+-       * through the manipulation of the MSR bits in uc_mcontext or (b) the
+-       * TM bit was disabled because a sufficient number of context switches
+-       * happened whilst in the signal handler and load_tm overflowed,
+-       * disabling the TM bit. In either case we can end up with an illegal
+-       * TM state leading to a TM Bad Thing when we return to userspace.
+-       */
+-      regs->msr |= MSR_TM;
+-
+       /* pull in MSR LE from user context */
+       regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ 
+@@ -572,6 +558,34 @@ static long restore_tm_sigcontexts(struct task_struct 
*tsk,
+       tm_enable();
+       /* Make sure the transaction is marked as failed */
+       tsk->thread.tm_texasr |= TEXASR_FS;
++
++      /*
++       * Disabling preemption, since it is unsafe to be preempted
++       * with MSR[TS] set without recheckpointing.
++       */
++      preempt_disable();
++
++      /* pull in MSR TS bits from user context */
++      regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
++
++      /*
++       * Ensure that TM is enabled in regs->msr before we leave the signal
++       * handler. It could be the case that (a) user disabled the TM bit
++       * through the manipulation of the MSR bits in uc_mcontext or (b) the
++       * TM bit was disabled because a sufficient number of context switches
++       * happened whilst in the signal handler and load_tm overflowed,
++       * disabling the TM bit. In either case we can end up with an illegal
++       * TM state leading to a TM Bad Thing when we return to userspace.
++       *
++       * CAUTION:
++       * After regs->MSR[TS] being updated, make sure that get_user(),
++       * put_user() or similar functions are *not* called. These
++       * functions can generate page faults which will cause the process
++       * to be de-scheduled with MSR[TS] set but without calling
++       * tm_recheckpoint(). This can cause a bug.
++       */
++      regs->msr |= MSR_TM;
++
+       /* This loads the checkpointed FP/VEC state, if used */
+       tm_recheckpoint(&tsk->thread);
+ 
+@@ -585,6 +599,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
+               regs->msr |= MSR_VEC;
+       }
+ 
++      preempt_enable();
++
+       return err;
+ }
+ #endif
+@@ -740,23 +756,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
+                                          &uc_transact->uc_mcontext))
+                       goto badframe;
+       }
+-#endif
++      else
+       /* Fall through, for non-TM restore */
+-      if (!MSR_TM_ACTIVE(msr)) {
+-              /*
+-               * Unset MSR[TS] on the thread regs since MSR from user
+-               * context does not have MSR active, and recheckpoint was
+-               * not called since restore_tm_sigcontexts() was not called
+-               * also.
+-               *
+-               * If not unsetting it, the code can RFID to userspace with
+-               * MSR[TS] set, but without CPU in the proper state,
+-               * causing a TM bad thing.
+-               */
+-              current->thread.regs->msr &= ~MSR_TS_MASK;
+-              if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+-                      goto badframe;
+-      }
++#endif
++      if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
++              goto badframe;
+ 
+       if (restore_altstack(&uc->uc_stack))
+               goto badframe;
+diff --git a/arch/powerpc/platforms/4xx/ocm.c 
b/arch/powerpc/platforms/4xx/ocm.c
+index f5bbd4563342..3632de52db0a 100644
+--- a/arch/powerpc/platforms/4xx/ocm.c
++++ b/arch/powerpc/platforms/4xx/ocm.c
+@@ -179,7 +179,7 @@ static void __init ocm_init_node(int count, struct 
device_node *node)
+       /* ioremap the non-cached region */
+       if (ocm->nc.memtotal) {
+               ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal,
+-                                       _PAGE_EXEC | PAGE_KERNEL_NCG);
++                      _PAGE_EXEC | pgprot_val(PAGE_KERNEL_NCG));
+ 
+               if (!ocm->nc.virt) {
+                       printk(KERN_ERR
+@@ -194,7 +194,7 @@ static void __init ocm_init_node(int count, struct 
device_node *node)
+ 
+       if (ocm->c.memtotal) {
+               ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal,
+-                                      _PAGE_EXEC | PAGE_KERNEL);
++                                      _PAGE_EXEC | pgprot_val(PAGE_KERNEL));
+ 
+               if (!ocm->c.virt) {
+                       printk(KERN_ERR
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 29bfe8017a2d..da1de190a3b1 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -54,13 +54,14 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct 
bio *bio)
+  * Mark a hardware queue as needing a restart. For shared queues, maintain
+  * a count of how many hardware queues are marked for restart.
+  */
+-static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
++void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+ {
+       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return;
+ 
+       set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ }
++EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
+ 
+ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
+ {
+diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
+index 8a9544203173..38e06e23821f 100644
+--- a/block/blk-mq-sched.h
++++ b/block/blk-mq-sched.h
+@@ -15,6 +15,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct 
bio *bio,
+                               struct request **merged_request);
+ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
+ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request 
*rq);
++void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
+ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
+ 
+ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
+diff --git a/block/blk-stat.h b/block/blk-stat.h
+index f4a1568e81a4..17b47a86eefb 100644
+--- a/block/blk-stat.h
++++ b/block/blk-stat.h
+@@ -145,6 +145,11 @@ static inline void blk_stat_activate_nsecs(struct 
blk_stat_callback *cb,
+       mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
+ }
+ 
++static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
++{
++      del_timer_sync(&cb->timer);
++}
++
+ /**
+  * blk_stat_activate_msecs() - Gather block statistics during a time window in
+  * milliseconds.
+diff --git a/block/blk-wbt.c b/block/blk-wbt.c
+index 8ac93fcbaa2e..0c62bf4eca75 100644
+--- a/block/blk-wbt.c
++++ b/block/blk-wbt.c
+@@ -760,8 +760,10 @@ void wbt_disable_default(struct request_queue *q)
+       if (!rqos)
+               return;
+       rwb = RQWB(rqos);
+-      if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
++      if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
++              blk_stat_deactivate(rwb->cb);
+               rwb->wb_normal = 0;
++      }
+ }
+ EXPORT_SYMBOL_GPL(wbt_disable_default);
+ 
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 099a9e05854c..d5e21ce44d2c 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -373,9 +373,16 @@ done:
+ 
+ /*
+  * One confusing aspect here is that we get called for a specific
+- * hardware queue, but we return a request that may not be for a
++ * hardware queue, but we may return a request that is for a
+  * different hardware queue. This is because mq-deadline has shared
+  * state for all hardware queues, in terms of sorting, FIFOs, etc.
++ *
++ * For a zoned block device, __dd_dispatch_request() may return NULL
++ * if all the queued write requests are directed at zones that are already
++ * locked due to on-going write requests. In this case, make sure to mark
++ * the queue as needing a restart to ensure that the queue is run again
++ * and the pending writes dispatched once the target zones for the ongoing
++ * write requests are unlocked in dd_finish_request().
+  */
+ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+@@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct 
blk_mq_hw_ctx *hctx)
+ 
+       spin_lock(&dd->lock);
+       rq = __dd_dispatch_request(dd);
++      if (!rq && blk_queue_is_zoned(hctx->queue) &&
++          !list_empty(&dd->fifo_list[WRITE]))
++              blk_mq_sched_mark_restart_hctx(hctx);
+       spin_unlock(&dd->lock);
+ 
+       return rq;
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 169412ee4ae8..dbba123e058d 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -933,11 +933,11 @@ static void __device_release_driver(struct device *dev, 
struct device *parent)
+ 
+               while (device_links_busy(dev)) {
+                       device_unlock(dev);
+-                      if (parent)
++                      if (parent && dev->bus->need_parent_lock)
+                               device_unlock(parent);
+ 
+                       device_links_unbind_consumers(dev);
+-                      if (parent)
++                      if (parent && dev->bus->need_parent_lock)
+                               device_lock(parent);
+ 
+                       device_lock(dev);
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 4879595200e1..8e6a0db6555f 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -382,8 +382,10 @@ static ssize_t backing_dev_store(struct device *dev,
+ 
+       bdev = bdgrab(I_BDEV(inode));
+       err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+-      if (err < 0)
++      if (err < 0) {
++              bdev = NULL;
+               goto out;
++      }
+ 
+       nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+       bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
+diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
+index 99e2aace8078..2c1f459c0c63 100644
+--- a/drivers/dax/pmem.c
++++ b/drivers/dax/pmem.c
+@@ -48,9 +48,8 @@ static void dax_pmem_percpu_exit(void *data)
+       percpu_ref_exit(ref);
+ }
+ 
+-static void dax_pmem_percpu_kill(void *data)
++static void dax_pmem_percpu_kill(struct percpu_ref *ref)
+ {
+-      struct percpu_ref *ref = data;
+       struct dax_pmem *dax_pmem = to_dax_pmem(ref);
+ 
+       dev_dbg(dax_pmem->dev, "trace\n");
+@@ -112,17 +111,10 @@ static int dax_pmem_probe(struct device *dev)
+       }
+ 
+       dax_pmem->pgmap.ref = &dax_pmem->ref;
++      dax_pmem->pgmap.kill = dax_pmem_percpu_kill;
+       addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
+-      if (IS_ERR(addr)) {
+-              devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
+-              percpu_ref_exit(&dax_pmem->ref);
++      if (IS_ERR(addr))
+               return PTR_ERR(addr);
+-      }
+-
+-      rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
+-                                                      &dax_pmem->ref);
+-      if (rc)
+-              return rc;
+ 
+       /* adjust the dax_region resource to the start of data */
+       memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c 
b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index db1bf7f88c1f..e0e6d66de745 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -1262,8 +1262,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
+ static void
+ nv50_mstm_init(struct nv50_mstm *mstm)
+ {
+-      if (mstm && mstm->mgr.mst_state)
+-              drm_dp_mst_topology_mgr_resume(&mstm->mgr);
++      int ret;
++
++      if (!mstm || !mstm->mgr.mst_state)
++              return;
++
++      ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
++      if (ret == -1) {
++              drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
++              drm_kms_helper_hotplug_event(mstm->mgr.dev);
++      }
+ }
+ 
+ static void
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c 
b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+index 79d00d861a31..01ff3c858875 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+@@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
+ int rockchip_drm_psr_register(struct drm_encoder *encoder,
+                       int (*psr_set)(struct drm_encoder *, bool enable))
+ {
+-      struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
++      struct rockchip_drm_private *drm_drv;
+       struct psr_drv *psr;
+ 
+       if (!encoder || !psr_set)
+               return -EINVAL;
+ 
++      drm_drv = encoder->dev->dev_private;
++
+       psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
+       if (!psr)
+               return -ENOMEM;
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index c6635f23918a..ae6254b0b1ae 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -321,6 +321,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct 
drm_plane_state *state)
+               if (vc4_state->is_unity)
+                       vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+       } else {
++              vc4_state->is_yuv = false;
+               vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+               vc4_state->y_scaling[1] = VC4_SCALING_NONE;
+       }
+diff --git a/drivers/hwtracing/intel_th/msu.c 
b/drivers/hwtracing/intel_th/msu.c
+index d293e55553bd..ba7aaf421f36 100644
+--- a/drivers/hwtracing/intel_th/msu.c
++++ b/drivers/hwtracing/intel_th/msu.c
+@@ -1423,7 +1423,8 @@ nr_pages_store(struct device *dev, struct 
device_attribute *attr,
+               if (!end)
+                       break;
+ 
+-              len -= end - p;
++              /* consume the number and the following comma, hence +1 */
++              len -= end - p + 1;
+               p = end + 1;
+       } while (len);
+ 
+diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
+index 0910ec807187..4b9e44b227d8 100644
+--- a/drivers/hwtracing/stm/policy.c
++++ b/drivers/hwtracing/stm/policy.c
+@@ -440,10 +440,8 @@ stp_policy_make(struct config_group *group, const char 
*name)
+ 
+       stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
+       if (!stm->policy) {
+-              mutex_unlock(&stm->policy_mutex);
+-              stm_put_protocol(pdrv);
+-              stm_put_device(stm);
+-              return ERR_PTR(-ENOMEM);
++              ret = ERR_PTR(-ENOMEM);
++              goto unlock_policy;
+       }
+ 
+       config_group_init_type_name(&stm->policy->group, name,
+@@ -458,7 +456,11 @@ unlock_policy:
+       mutex_unlock(&stm->policy_mutex);
+ 
+       if (IS_ERR(ret)) {
+-              stm_put_protocol(stm->pdrv);
++              /*
++               * pdrv and stm->pdrv at this point can be quite different,
++               * and only one of them needs to be 'put'
++               */
++              stm_put_protocol(pdrv);
+               stm_put_device(stm);
+       }
+ 
+diff --git a/drivers/iio/adc/qcom-spmi-adc5.c 
b/drivers/iio/adc/qcom-spmi-adc5.c
+index f9af6b082916..6a866cc187f7 100644
+--- a/drivers/iio/adc/qcom-spmi-adc5.c
++++ b/drivers/iio/adc/qcom-spmi-adc5.c
+@@ -423,6 +423,7 @@ struct adc5_channels {
+       enum vadc_scale_fn_type scale_fn_type;
+ };
+ 
++/* In these definitions, _pre refers to an index into adc5_prescale_ratios. */
+ #define ADC5_CHAN(_dname, _type, _mask, _pre, _scale)                 \
+       {                                                               \
+               .datasheet_name = _dname,                               \
+@@ -443,63 +444,63 @@ struct adc5_channels {
+                 _pre, _scale)                                         \
+ 
+ static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
+-      [ADC5_REF_GND]          = ADC5_CHAN_VOLT("ref_gnd", 1,
++      [ADC5_REF_GND]          = ADC5_CHAN_VOLT("ref_gnd", 0,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_1P25VREF]         = ADC5_CHAN_VOLT("vref_1p25", 1,
++      [ADC5_1P25VREF]         = ADC5_CHAN_VOLT("vref_1p25", 0,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_VPH_PWR]          = ADC5_CHAN_VOLT("vph_pwr", 3,
++      [ADC5_VPH_PWR]          = ADC5_CHAN_VOLT("vph_pwr", 1,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_VBAT_SNS]         = ADC5_CHAN_VOLT("vbat_sns", 3,
++      [ADC5_VBAT_SNS]         = ADC5_CHAN_VOLT("vbat_sns", 1,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_DIE_TEMP]         = ADC5_CHAN_TEMP("die_temp", 1,
++      [ADC5_DIE_TEMP]         = ADC5_CHAN_TEMP("die_temp", 0,
+                                       SCALE_HW_CALIB_PMIC_THERM)
+-      [ADC5_USB_IN_I]         = ADC5_CHAN_VOLT("usb_in_i_uv", 1,
++      [ADC5_USB_IN_I]         = ADC5_CHAN_VOLT("usb_in_i_uv", 0,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_USB_IN_V_16]      = ADC5_CHAN_VOLT("usb_in_v_div_16", 16,
++      [ADC5_USB_IN_V_16]      = ADC5_CHAN_VOLT("usb_in_v_div_16", 8,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_CHG_TEMP]         = ADC5_CHAN_TEMP("chg_temp", 1,
++      [ADC5_CHG_TEMP]         = ADC5_CHAN_TEMP("chg_temp", 0,
+                                       SCALE_HW_CALIB_PM5_CHG_TEMP)
+       /* Charger prescales SBUx and MID_CHG to fit within 1.8V upper unit */
+-      [ADC5_SBUx]             = ADC5_CHAN_VOLT("chg_sbux", 3,
++      [ADC5_SBUx]             = ADC5_CHAN_VOLT("chg_sbux", 1,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_MID_CHG_DIV6]     = ADC5_CHAN_VOLT("chg_mid_chg", 6,
++      [ADC5_MID_CHG_DIV6]     = ADC5_CHAN_VOLT("chg_mid_chg", 3,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 1,
++      [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 0,
+                                       SCALE_HW_CALIB_XOTHERM)
+-      [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 1,
++      [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+-      [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 1,
++      [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+-      [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 1,
++      [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+-      [ADC5_AMUX_THM2]        = ADC5_CHAN_TEMP("amux_thm2", 1,
++      [ADC5_AMUX_THM2]        = ADC5_CHAN_TEMP("amux_thm2", 0,
+                                       SCALE_HW_CALIB_PM5_SMB_TEMP)
+ };
+ 
+ static const struct adc5_channels adc5_chans_rev2[ADC5_MAX_CHANNEL] = {
+-      [ADC5_REF_GND]          = ADC5_CHAN_VOLT("ref_gnd", 1,
++      [ADC5_REF_GND]          = ADC5_CHAN_VOLT("ref_gnd", 0,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_1P25VREF]         = ADC5_CHAN_VOLT("vref_1p25", 1,
++      [ADC5_1P25VREF]         = ADC5_CHAN_VOLT("vref_1p25", 0,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_VPH_PWR]          = ADC5_CHAN_VOLT("vph_pwr", 3,
++      [ADC5_VPH_PWR]          = ADC5_CHAN_VOLT("vph_pwr", 1,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_VBAT_SNS]         = ADC5_CHAN_VOLT("vbat_sns", 3,
++      [ADC5_VBAT_SNS]         = ADC5_CHAN_VOLT("vbat_sns", 1,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_VCOIN]            = ADC5_CHAN_VOLT("vcoin", 3,
++      [ADC5_VCOIN]            = ADC5_CHAN_VOLT("vcoin", 1,
+                                       SCALE_HW_CALIB_DEFAULT)
+-      [ADC5_DIE_TEMP]         = ADC5_CHAN_TEMP("die_temp", 1,
++      [ADC5_DIE_TEMP]         = ADC5_CHAN_TEMP("die_temp", 0,
+                                       SCALE_HW_CALIB_PMIC_THERM)
+-      [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 1,
++      [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+-      [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 1,
++      [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+-      [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 1,
++      [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+-      [ADC5_AMUX_THM4_100K_PU] = ADC5_CHAN_TEMP("amux_thm4_100k_pu", 1,
++      [ADC5_AMUX_THM4_100K_PU] = ADC5_CHAN_TEMP("amux_thm4_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+-      [ADC5_AMUX_THM5_100K_PU] = ADC5_CHAN_TEMP("amux_thm5_100k_pu", 1,
++      [ADC5_AMUX_THM5_100K_PU] = ADC5_CHAN_TEMP("amux_thm5_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+-      [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm_100k_pu", 1,
++      [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm_100k_pu", 0,
+                                       SCALE_HW_CALIB_THERM_100K_PULLUP)
+ };
+ 
+@@ -558,6 +559,9 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
+                       return ret;
+               }
+               prop->prescale = ret;
++      } else {
++              prop->prescale =
++                      adc->data->adc_chans[prop->channel].prescale_index;
+       }
+ 
+       ret = of_property_read_u32(node, "qcom,hw-settle-time", &value);
+diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
+index 0e134b13967a..eae740fceed9 100644
+--- a/drivers/iio/dac/ad5686.c
++++ b/drivers/iio/dac/ad5686.c
+@@ -124,7 +124,8 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
+               mutex_unlock(&indio_dev->mlock);
+               if (ret < 0)
+                       return ret;
+-              *val = ret;
++              *val = (ret >> chan->scan_type.shift) &
++                      GENMASK(chan->scan_type.realbits - 1, 0);
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               *val = st->vref_mv;
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index ba668d49c751..476abc74178e 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -502,17 +502,21 @@ static void iw_cm_check_wildcard(struct sockaddr_storage 
*pm_addr,
+  */
+ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
+ {
++      const char *devname = dev_name(&cm_id->device->dev);
++      const char *ifname = cm_id->device->iwcm->ifname;
+       struct iwpm_dev_data pm_reg_msg;
+       struct iwpm_sa_data pm_msg;
+       int status;
+ 
++      if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
++          strlen(ifname) >= sizeof(pm_reg_msg.if_name))
++              return -EINVAL;
++
+       cm_id->m_local_addr = cm_id->local_addr;
+       cm_id->m_remote_addr = cm_id->remote_addr;
+ 
+-      memcpy(pm_reg_msg.dev_name, dev_name(&cm_id->device->dev),
+-             sizeof(pm_reg_msg.dev_name));
+-      memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
+-             sizeof(pm_reg_msg.if_name));
++      strncpy(pm_reg_msg.dev_name, devname, sizeof(pm_reg_msg.dev_name));
++      strncpy(pm_reg_msg.if_name, ifname, sizeof(pm_reg_msg.if_name));
+ 
+       if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
+           !iwpm_valid_pid())
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c 
b/drivers/infiniband/sw/rxe/rxe_resp.c
+index c962160292f4..f0438bc6df88 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -844,11 +844,16 @@ static enum resp_states do_complete(struct rxe_qp *qp,
+ 
+       memset(&cqe, 0, sizeof(cqe));
+ 
+-      wc->wr_id               = wqe->wr_id;
+-      wc->status              = qp->resp.status;
+-      wc->qp                  = &qp->ibqp;
++      if (qp->rcq->is_user) {
++              uwc->status             = qp->resp.status;
++              uwc->qp_num             = qp->ibqp.qp_num;
++              uwc->wr_id              = wqe->wr_id;
++      } else {
++              wc->status              = qp->resp.status;
++              wc->qp                  = &qp->ibqp;
++              wc->wr_id               = wqe->wr_id;
++      }
+ 
+-      /* fields after status are not required for errors */
+       if (wc->status == IB_WC_SUCCESS) {
+               wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
+                               pkt->mask & RXE_WRITE_MASK) ?
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c 
b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 2357aa727dcf..96c767324575 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -2010,6 +2010,14 @@ static void srpt_free_ch(struct kref *kref)
+       kfree_rcu(ch, rcu);
+ }
+ 
++/*
++ * Shut down the SCSI target session, tell the connection manager to
++ * disconnect the associated RDMA channel, transition the QP to the error
++ * state and remove the channel from the channel list. This function is
++ * typically called from inside srpt_zerolength_write_done(). Concurrent
++ * srpt_zerolength_write() calls from inside srpt_close_ch() are possible
++ * as long as the channel is on sport->nexus_list.
++ */
+ static void srpt_release_channel_work(struct work_struct *w)
+ {
+       struct srpt_rdma_ch *ch;
+@@ -2037,6 +2045,11 @@ static void srpt_release_channel_work(struct 
work_struct *w)
+       else
+               ib_destroy_cm_id(ch->ib_cm.cm_id);
+ 
++      sport = ch->sport;
++      mutex_lock(&sport->mutex);
++      list_del_rcu(&ch->list);
++      mutex_unlock(&sport->mutex);
++
+       srpt_destroy_ch_ib(ch);
+ 
+       srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+@@ -2047,11 +2060,6 @@ static void srpt_release_channel_work(struct 
work_struct *w)
+                            sdev, ch->rq_size,
+                            srp_max_req_size, DMA_FROM_DEVICE);
+ 
+-      sport = ch->sport;
+-      mutex_lock(&sport->mutex);
+-      list_del_rcu(&ch->list);
+-      mutex_unlock(&sport->mutex);
+-
+       wake_up(&sport->ch_releaseQ);
+ 
+       kref_put(&ch->kref, srpt_free_ch);
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 41a4b8808802..f3afab82f3ee 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2044,7 +2044,7 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
+        * than default.  Unnecessary for PT mode.
+        */
+       if (translation != CONTEXT_TT_PASS_THROUGH) {
+-              for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
++              for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+                       ret = -ENOMEM;
+                       pgd = phys_to_virt(dma_pte_addr(pgd));
+                       if (!dma_pte_present(pgd))
+@@ -2058,7 +2058,7 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
+                       translation = CONTEXT_TT_MULTI_LEVEL;
+ 
+               context_set_address_root(context, virt_to_phys(pgd));
+-              context_set_address_width(context, iommu->agaw);
++              context_set_address_width(context, agaw);
+       } else {
+               /*
+                * In pass through mode, AW must be programmed to
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 9038c302d5c2..44f180e47622 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1927,6 +1927,9 @@ void dm_table_set_restrictions(struct dm_table *t, 
struct request_queue *q,
+        */
+       if (blk_queue_is_zoned(q))
+               blk_revalidate_disk_zones(t->md->disk);
++
++      /* Allow reads to exceed readahead limits */
++      q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
+ }
+ 
+ unsigned int dm_table_get_num_targets(struct dm_table *t)
+diff --git a/drivers/media/pci/cx23885/cx23885-core.c 
b/drivers/media/pci/cx23885/cx23885-core.c
+index 39804d830305..fd5c52b21436 100644
+--- a/drivers/media/pci/cx23885/cx23885-core.c
++++ b/drivers/media/pci/cx23885/cx23885-core.c
+@@ -23,6 +23,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/kmod.h>
+ #include <linux/kernel.h>
++#include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+@@ -41,6 +42,18 @@ MODULE_AUTHOR("Steven Toth <st...@linuxtv.org>");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(CX23885_VERSION);
+ 
++/*
++ * Some platforms have been found to require periodic resetting of the DMA
++ * engine. Ryzen and XEON platforms are known to be affected. The symptom
++ * encountered is "mpeg risc op code error". Only Ryzen platforms employ
++ * this workaround if the option equals 1. The workaround can be explicitly
++ * disabled for all platforms by setting to 0, the workaround can be forced
++ * on for any platform by setting to 2.
++ */
++static unsigned int dma_reset_workaround = 1;
++module_param(dma_reset_workaround, int, 0644);
++MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 
0-force disable, 1-driver detect (default), 2-force enable");
++
+ static unsigned int debug;
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "enable debug messages");
+@@ -603,8 +616,13 @@ static void cx23885_risc_disasm(struct cx23885_tsport 
*port,
+ 
+ static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
+ {
+-      uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
+-      uint32_t reg2_val = cx_read(TC_REQ_SET);
++      uint32_t reg1_val, reg2_val;
++
++      if (!dev->need_dma_reset)
++              return;
++
++      reg1_val = cx_read(TC_REQ); /* read-only */
++      reg2_val = cx_read(TC_REQ_SET);
+ 
+       if (reg1_val && reg2_val) {
+               cx_write(TC_REQ, reg1_val);
+@@ -2058,6 +2076,37 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 
mask, int asoutput)
+       /* TODO: 23-19 */
+ }
+ 
++static struct {
++      int vendor, dev;
++} const broken_dev_id[] = {
++      /* According with
++       * 
https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
++       * 0x1451 is PCI ID for the IOMMU found on Ryzen
++       */
++      { PCI_VENDOR_ID_AMD, 0x1451 },
++};
++
++static bool cx23885_does_need_dma_reset(void)
++{
++      int i;
++      struct pci_dev *pdev = NULL;
++
++      if (dma_reset_workaround == 0)
++              return false;
++      else if (dma_reset_workaround == 2)
++              return true;
++
++      for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
++              pdev = pci_get_device(broken_dev_id[i].vendor,
++                                    broken_dev_id[i].dev, NULL);
++              if (pdev) {
++                      pci_dev_put(pdev);
++                      return true;
++              }
++      }
++      return false;
++}
++
+ static int cx23885_initdev(struct pci_dev *pci_dev,
+                          const struct pci_device_id *pci_id)
+ {
+@@ -2069,6 +2118,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
+       if (NULL == dev)
+               return -ENOMEM;
+ 
++      dev->need_dma_reset = cx23885_does_need_dma_reset();
++
+       err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
+       if (err < 0)
+               goto fail_free;
+diff --git a/drivers/media/pci/cx23885/cx23885.h 
b/drivers/media/pci/cx23885/cx23885.h
+index d54c7ee1ab21..cf965efabe66 100644
+--- a/drivers/media/pci/cx23885/cx23885.h
++++ b/drivers/media/pci/cx23885/cx23885.h
+@@ -451,6 +451,8 @@ struct cx23885_dev {
+       /* Analog raw audio */
+       struct cx23885_audio_dev   *audio_dev;
+ 
++      /* Does the system require periodic DMA resets? */
++      unsigned int            need_dma_reset:1;
+ };
+ 
+ static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
+diff --git a/drivers/misc/genwqe/card_utils.c 
b/drivers/misc/genwqe/card_utils.c
+index 3fcb9a2fe1c9..efe2fb72d54b 100644
+--- a/drivers/misc/genwqe/card_utils.c
++++ b/drivers/misc/genwqe/card_utils.c
+@@ -215,7 +215,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
+ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
+                              dma_addr_t *dma_handle)
+ {
+-      if (get_order(size) > MAX_ORDER)
++      if (get_order(size) >= MAX_ORDER)
+               return NULL;
+ 
+       return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
+diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c 
b/drivers/net/wireless/broadcom/b43/phy_common.c
+index 85f2ca989565..ef3ffa5ad466 100644
+--- a/drivers/net/wireless/broadcom/b43/phy_common.c
++++ b/drivers/net/wireless/broadcom/b43/phy_common.c
+@@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta)
+       u8 i;
+       s32 tmp;
+       s8 signx = 1;
+-      u32 angle = 0;
++      s32 angle = 0;
+       struct b43_c32 ret = { .i = 39797, .q = 0, };
+ 
+       while (theta > (180 << 16))
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 0e39e3d1846f..d28418b05a04 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -309,8 +309,11 @@ static void pmem_release_queue(void *q)
+       blk_cleanup_queue(q);
+ }
+ 
+-static void pmem_freeze_queue(void *q)
++static void pmem_freeze_queue(struct percpu_ref *ref)
+ {
++      struct request_queue *q;
++
++      q = container_of(ref, typeof(*q), q_usage_counter);
+       blk_freeze_queue_start(q);
+ }
+ 
+@@ -402,6 +405,7 @@ static int pmem_attach_disk(struct device *dev,
+ 
+       pmem->pfn_flags = PFN_DEV;
+       pmem->pgmap.ref = &q->q_usage_counter;
++      pmem->pgmap.kill = pmem_freeze_queue;
+       if (is_nd_pfn(dev)) {
+               if (setup_pagemap_fsdax(dev, &pmem->pgmap))
+                       return -ENOMEM;
+@@ -427,13 +431,6 @@ static int pmem_attach_disk(struct device *dev,
+               memcpy(&bb_res, &nsio->res, sizeof(bb_res));
+       }
+ 
+-      /*
+-       * At release time the queue must be frozen before
+-       * devm_memremap_pages is unwound
+-       */
+-      if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
+-              return -ENOMEM;
+-
+       if (IS_ERR(addr))
+               return PTR_ERR(addr);
+       pmem->virt_addr = addr;
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 09692c9b32a7..6d20b6dcf034 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -116,9 +116,6 @@ int __weak of_node_to_nid(struct device_node *np)
+ }
+ #endif
+ 
+-static struct device_node **phandle_cache;
+-static u32 phandle_cache_mask;
+-
+ /*
+  * Assumptions behind phandle_cache implementation:
+  *   - phandle property values are in a contiguous range of 1..n
+@@ -127,6 +124,66 @@ static u32 phandle_cache_mask;
+  *   - the phandle lookup overhead reduction provided by the cache
+  *     will likely be less
+  */
++
++static struct device_node **phandle_cache;
++static u32 phandle_cache_mask;
++
++/*
++ * Caller must hold devtree_lock.
++ */
++static void __of_free_phandle_cache(void)
++{
++      u32 cache_entries = phandle_cache_mask + 1;
++      u32 k;
++
++      if (!phandle_cache)
++              return;
++
++      for (k = 0; k < cache_entries; k++)
++              of_node_put(phandle_cache[k]);
++
++      kfree(phandle_cache);
++      phandle_cache = NULL;
++}
++
++int of_free_phandle_cache(void)
++{
++      unsigned long flags;
++
++      raw_spin_lock_irqsave(&devtree_lock, flags);
++
++      __of_free_phandle_cache();
++
++      raw_spin_unlock_irqrestore(&devtree_lock, flags);
++
++      return 0;
++}
++#if !defined(CONFIG_MODULES)
++late_initcall_sync(of_free_phandle_cache);
++#endif
++
++/*
++ * Caller must hold devtree_lock.
++ */
++void __of_free_phandle_cache_entry(phandle handle)
++{
++      phandle masked_handle;
++      struct device_node *np;
++
++      if (!handle)
++              return;
++
++      masked_handle = handle & phandle_cache_mask;
++
++      if (phandle_cache) {
++              np = phandle_cache[masked_handle];
++              if (np && handle == np->phandle) {
++                      of_node_put(np);
++                      phandle_cache[masked_handle] = NULL;
++              }
++      }
++}
++
+ void of_populate_phandle_cache(void)
+ {
+       unsigned long flags;
+@@ -136,8 +193,7 @@ void of_populate_phandle_cache(void)
+ 
+       raw_spin_lock_irqsave(&devtree_lock, flags);
+ 
+-      kfree(phandle_cache);
+-      phandle_cache = NULL;
++      __of_free_phandle_cache();
+ 
+       for_each_of_allnodes(np)
+               if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
+@@ -155,30 +211,15 @@ void of_populate_phandle_cache(void)
+               goto out;
+ 
+       for_each_of_allnodes(np)
+-              if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
++              if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
++                      of_node_get(np);
+                       phandle_cache[np->phandle & phandle_cache_mask] = np;
++              }
+ 
+ out:
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ }
+ 
+-int of_free_phandle_cache(void)
+-{
+-      unsigned long flags;
+-
+-      raw_spin_lock_irqsave(&devtree_lock, flags);
+-
+-      kfree(phandle_cache);
+-      phandle_cache = NULL;
+-
+-      raw_spin_unlock_irqrestore(&devtree_lock, flags);
+-
+-      return 0;
+-}
+-#if !defined(CONFIG_MODULES)
+-late_initcall_sync(of_free_phandle_cache);
+-#endif
+-
+ void __init of_core_init(void)
+ {
+       struct device_node *np;
+@@ -1190,13 +1231,23 @@ struct device_node *of_find_node_by_phandle(phandle 
handle)
+               if (phandle_cache[masked_handle] &&
+                   handle == phandle_cache[masked_handle]->phandle)
+                       np = phandle_cache[masked_handle];
++              if (np && of_node_check_flag(np, OF_DETACHED)) {
++                      WARN_ON(1); /* did not uncache np on node removal */
++                      of_node_put(np);
++                      phandle_cache[masked_handle] = NULL;
++                      np = NULL;
++              }
+       }
+ 
+       if (!np) {
+               for_each_of_allnodes(np)
+-                      if (np->phandle == handle) {
+-                              if (phandle_cache)
++                      if (np->phandle == handle &&
++                          !of_node_check_flag(np, OF_DETACHED)) {
++                              if (phandle_cache) {
++                                      /* will put when removed from cache */
++                                      of_node_get(np);
+                                       phandle_cache[masked_handle] = np;
++                              }
+                               break;
+                       }
+       }
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index f4f8ed9b5454..ecea92f68c87 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -268,6 +268,9 @@ void __of_detach_node(struct device_node *np)
+       }
+ 
+       of_node_set_flag(np, OF_DETACHED);
++
++      /* race with of_find_node_by_phandle() prevented by devtree_lock */
++      __of_free_phandle_cache_entry(np->phandle);
+ }
+ 
+ /**
+diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
+index 5d1567025358..24786818e32e 100644
+--- a/drivers/of/of_private.h
++++ b/drivers/of/of_private.h
+@@ -84,6 +84,10 @@ static inline void __of_detach_node_sysfs(struct 
device_node *np) {}
+ int of_resolve_phandles(struct device_node *tree);
+ #endif
+ 
++#if defined(CONFIG_OF_DYNAMIC)
++void __of_free_phandle_cache_entry(phandle handle);
++#endif
++
+ #if defined(CONFIG_OF_OVERLAY)
+ void of_overlay_mutex_lock(void);
+ void of_overlay_mutex_unlock(void);
+diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
+index ae3c5b25dcc7..a2eb25271c96 100644
+--- a/drivers/pci/p2pdma.c
++++ b/drivers/pci/p2pdma.c
+@@ -82,10 +82,8 @@ static void pci_p2pdma_percpu_release(struct percpu_ref 
*ref)
+       complete_all(&p2p->devmap_ref_done);
+ }
+ 
+-static void pci_p2pdma_percpu_kill(void *data)
++static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
+ {
+-      struct percpu_ref *ref = data;
+-
+       /*
+        * pci_p2pdma_add_resource() may be called multiple times
+        * by a driver and may register the percpu_kill devm action multiple
+@@ -198,6 +196,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, 
size_t size,
+       pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
+       pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
+               pci_resource_start(pdev, bar);
++      pgmap->kill = pci_p2pdma_percpu_kill;
+ 
+       addr = devm_memremap_pages(&pdev->dev, pgmap);
+       if (IS_ERR(addr)) {
+@@ -211,11 +210,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int 
bar, size_t size,
+       if (error)
+               goto pgmap_free;
+ 
+-      error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill,
+-                                        &pdev->p2pdma->devmap_ref);
+-      if (error)
+-              goto pgmap_free;
+-
+       pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
+                &pgmap->res);
+ 
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index bef17c3fca67..33f3f475e5c6 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev)
+               return 0;
+       }
+ 
+-      if (!pm || !pm->runtime_suspend)
+-              return -ENOSYS;
+-
+       pci_dev->state_saved = false;
+-      error = pm->runtime_suspend(dev);
+-      if (error) {
++      if (pm && pm->runtime_suspend) {
++              error = pm->runtime_suspend(dev);
+               /*
+                * -EBUSY and -EAGAIN is used to request the runtime PM core
+                * to schedule a new suspend, so log the event only with debug
+                * log level.
+                */
+-              if (error == -EBUSY || error == -EAGAIN)
++              if (error == -EBUSY || error == -EAGAIN) {
+                       dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
+                               pm->runtime_suspend, error);
+-              else
++                      return error;
++              } else if (error) {
+                       dev_err(dev, "can't suspend (%pf returned %d)\n",
+                               pm->runtime_suspend, error);
+-
+-              return error;
++                      return error;
++              }
+       }
+ 
+       pci_fixup_device(pci_fixup_suspend, pci_dev);
+ 
+-      if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
++      if (pm && pm->runtime_suspend
++          && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
+           && pci_dev->current_state != PCI_UNKNOWN) {
+               WARN_ONCE(pci_dev->current_state != prev,
+                       "PCI PM: State of device not saved by %pF\n",
+@@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
+ 
+ static int pci_pm_runtime_resume(struct device *dev)
+ {
+-      int rc;
++      int rc = 0;
+       struct pci_dev *pci_dev = to_pci_dev(dev);
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ 
+@@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev)
+       if (!pci_dev->driver)
+               return 0;
+ 
+-      if (!pm || !pm->runtime_resume)
+-              return -ENOSYS;
+-
+       pci_fixup_device(pci_fixup_resume_early, pci_dev);
+       pci_enable_wake(pci_dev, PCI_D0, false);
+       pci_fixup_device(pci_fixup_resume, pci_dev);
+ 
+-      rc = pm->runtime_resume(dev);
++      if (pm && pm->runtime_resume)
++              rc = pm->runtime_resume(dev);
+ 
+       pci_dev->runtime_d3cold = false;
+ 
+diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c 
b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+index 1b10ea05a914..69372e2bc93c 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+@@ -30,8 +30,8 @@
+ #define DDRC_FLUX_RCMD          0x38c
+ #define DDRC_PRE_CMD            0x3c0
+ #define DDRC_ACT_CMD            0x3c4
+-#define DDRC_BNK_CHG            0x3c8
+ #define DDRC_RNK_CHG            0x3cc
++#define DDRC_RW_CHG             0x3d0
+ #define DDRC_EVENT_CTRL         0x6C0
+ #define DDRC_INT_MASK         0x6c8
+ #define DDRC_INT_STATUS               0x6cc
+@@ -51,7 +51,7 @@
+ 
+ static const u32 ddrc_reg_off[] = {
+       DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
+-      DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
++      DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
+ };
+ 
+ /*
+diff --git a/drivers/power/supply/olpc_battery.c 
b/drivers/power/supply/olpc_battery.c
+index 6da79ae14860..5a97e42a3547 100644
+--- a/drivers/power/supply/olpc_battery.c
++++ b/drivers/power/supply/olpc_battery.c
+@@ -428,14 +428,14 @@ static int olpc_bat_get_property(struct power_supply 
*psy,
+               if (ret)
+                       return ret;
+ 
+-              val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
++              val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
+               break;
+       case POWER_SUPPLY_PROP_TEMP_AMBIENT:
+               ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
+               if (ret)
+                       return ret;
+ 
+-              val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
++              val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+               ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index 94f4d8fe85e0..d1b531fe9ada 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct 
zfcp_adapter *adapter)
+  */
+ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
+ {
+-      while (atomic_read(&adapter->stat_miss) > 0)
++      while (atomic_add_unless(&adapter->stat_miss, -1, 0))
+               if (zfcp_fsf_status_read(adapter->qdio)) {
++                      atomic_inc(&adapter->stat_miss); /* undo add -1 */
+                       if (atomic_read(&adapter->stat_miss) >=
+                           adapter->stat_read_buf_num) {
+                               zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
+                               return 1;
+                       }
+                       break;
+-              } else
+-                      atomic_dec(&adapter->stat_miss);
++              }
+       return 0;
+ }
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index b9e5cd79931a..462ed4ad21d2 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -14501,7 +14501,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t 
page_size,
+                       hw_page_size))/hw_page_size;
+ 
+       /* If needed, Adjust page count to match the max the adapter supports */
+-      if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
++      if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
++          (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
+               queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
+ 
+       INIT_LIST_HEAD(&queue->list);
+diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+index bbed039617a4..d59c8a59f582 100644
+--- a/drivers/video/fbdev/pxafb.c
++++ b/drivers/video/fbdev/pxafb.c
+@@ -2234,10 +2234,8 @@ static struct pxafb_mach_info 
*of_pxafb_of_mach_info(struct device *dev)
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+       ret = of_get_pxafb_mode_info(dev, info);
+-      if (ret) {
+-              kfree(info->modes);
++      if (ret)
+               return ERR_PTR(ret);
+-      }
+ 
+       /*
+        * On purpose, neither lccrX registers nor video memory size can be
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index f3496db4bb3e..a58666a3f8dd 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -3569,7 +3569,6 @@ retry:
+                       tcap->cap_id = t_cap_id;
+                       tcap->seq = t_seq - 1;
+                       tcap->issue_seq = t_seq - 1;
+-                      tcap->mseq = t_mseq;
+                       tcap->issued |= issued;
+                       tcap->implemented |= issued;
+                       if (cap == ci->i_auth_cap)
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index cc91963683de..a928ba008d7d 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1209,6 +1209,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb 
**lkb_ret)
+ 
+       if (rv < 0) {
+               log_error(ls, "create_lkb idr error %d", rv);
++              dlm_free_lkb(lkb);
+               return rv;
+       }
+ 
+@@ -4179,6 +4180,7 @@ static int receive_convert(struct dlm_ls *ls, struct 
dlm_message *ms)
+                         (unsigned long long)lkb->lkb_recover_seq,
+                         ms->m_header.h_nodeid, ms->m_lkid);
+               error = -ENOENT;
++              dlm_put_lkb(lkb);
+               goto fail;
+       }
+ 
+@@ -4232,6 +4234,7 @@ static int receive_unlock(struct dlm_ls *ls, struct 
dlm_message *ms)
+                         lkb->lkb_id, lkb->lkb_remid,
+                         ms->m_header.h_nodeid, ms->m_lkid);
+               error = -ENOENT;
++              dlm_put_lkb(lkb);
+               goto fail;
+       }
+ 
+@@ -5792,20 +5795,20 @@ int dlm_user_request(struct dlm_ls *ls, struct 
dlm_user_args *ua,
+                       goto out;
+               }
+       }
+-
+-      /* After ua is attached to lkb it will be freed by dlm_free_lkb().
+-         When DLM_IFL_USER is set, the dlm knows that this is a userspace
+-         lock and that lkb_astparam is the dlm_user_args structure. */
+-
+       error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
+                             fake_astfn, ua, fake_bastfn, &args);
+-      lkb->lkb_flags |= DLM_IFL_USER;
+-
+       if (error) {
++              kfree(ua->lksb.sb_lvbptr);
++              ua->lksb.sb_lvbptr = NULL;
++              kfree(ua);
+               __put_lkb(ls, lkb);
+               goto out;
+       }
+ 
++      /* After ua is attached to lkb it will be freed by dlm_free_lkb().
++         When DLM_IFL_USER is set, the dlm knows that this is a userspace
++         lock and that lkb_astparam is the dlm_user_args structure. */
++      lkb->lkb_flags |= DLM_IFL_USER;
+       error = request_lock(ls, lkb, name, namelen, &args);
+ 
+       switch (error) {
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index 5ba94be006ee..6a1529e478f3 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -680,11 +680,11 @@ static int new_lockspace(const char *name, const char 
*cluster,
+       kfree(ls->ls_recover_buf);
+  out_lkbidr:
+       idr_destroy(&ls->ls_lkbidr);
++ out_rsbtbl:
+       for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
+               if (ls->ls_remove_names[i])
+                       kfree(ls->ls_remove_names[i]);
+       }
+- out_rsbtbl:
+       vfree(ls->ls_rsbtbl);
+  out_lsfree:
+       if (do_unreg)
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 648f0ca1ad57..998051c4aea7 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -744,17 +744,19 @@ static int gfs2_create_inode(struct inode *dir, struct 
dentry *dentry,
+                              the gfs2 structures. */
+       if (default_acl) {
+               error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
++              if (error)
++                      goto fail_gunlock3;
+               posix_acl_release(default_acl);
++              default_acl = NULL;
+       }
+       if (acl) {
+-              if (!error)
+-                      error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
++              error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
++              if (error)
++                      goto fail_gunlock3;
+               posix_acl_release(acl);
++              acl = NULL;
+       }
+ 
+-      if (error)
+-              goto fail_gunlock3;
+-
+       error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
+                                            &gfs2_initxattrs, NULL);
+       if (error)
+@@ -789,10 +791,8 @@ fail_free_inode:
+       }
+       gfs2_rsqa_delete(ip, NULL);
+ fail_free_acls:
+-      if (default_acl)
+-              posix_acl_release(default_acl);
+-      if (acl)
+-              posix_acl_release(acl);
++      posix_acl_release(default_acl);
++      posix_acl_release(acl);
+ fail_gunlock:
+       gfs2_dir_no_add(&da);
+       gfs2_glock_dq_uninit(ghs);
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index b08a530433ad..8d7916570362 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, 
u32 *minext,
+                       goto next_iter;
+               }
+               if (ret == -E2BIG) {
++                      n += rbm->bii - initial_bii;
+                       rbm->bii = 0;
+                       rbm->offset = 0;
+-                      n += (rbm->bii - initial_bii);
+                       goto res_covered_end_of_rgrp;
+               }
+               return ret;
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index d20b92f271c2..0a67dd4250e9 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -442,7 +442,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
+                       fl->fl_start = req->a_res.lock.fl.fl_start;
+                       fl->fl_end = req->a_res.lock.fl.fl_end;
+                       fl->fl_type = req->a_res.lock.fl.fl_type;
+-                      fl->fl_pid = 0;
++                      fl->fl_pid = -req->a_res.lock.fl.fl_pid;
+                       break;
+               default:
+                       status = nlm_stat_to_errno(req->a_res.status);
+diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
+index 7147e4aebecc..9846f7e95282 100644
+--- a/fs/lockd/xdr.c
++++ b/fs/lockd/xdr.c
+@@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
+ 
+       locks_init_lock(fl);
+       fl->fl_owner = current->files;
+-      fl->fl_pid   = (pid_t)lock->svid;
++      fl->fl_pid   = current->tgid;
+       fl->fl_flags = FL_POSIX;
+       fl->fl_type  = F_RDLCK;         /* as good as anything else */
+       start = ntohl(*p++);
+@@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
+       memset(lock, 0, sizeof(*lock));
+       locks_init_lock(&lock->fl);
+       lock->svid = ~(u32) 0;
+-      lock->fl.fl_pid = (pid_t)lock->svid;
++      lock->fl.fl_pid = current->tgid;
+ 
+       if (!(p = nlm_decode_cookie(p, &argp->cookie))
+        || !(p = xdr_decode_string_inplace(p, &lock->caller,
+diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
+index 7ed9edf9aed4..70154f376695 100644
+--- a/fs/lockd/xdr4.c
++++ b/fs/lockd/xdr4.c
+@@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
+ 
+       locks_init_lock(fl);
+       fl->fl_owner = current->files;
+-      fl->fl_pid   = (pid_t)lock->svid;
++      fl->fl_pid   = current->tgid;
+       fl->fl_flags = FL_POSIX;
+       fl->fl_type  = F_RDLCK;         /* as good as anything else */
+       p = xdr_decode_hyper(p, &start);
+@@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
+       memset(lock, 0, sizeof(*lock));
+       locks_init_lock(&lock->fl);
+       lock->svid = ~(u32) 0;
+-      lock->fl.fl_pid = (pid_t)lock->svid;
++      lock->fl.fl_pid = current->tgid;
+ 
+       if (!(p = nlm4_decode_cookie(p, &argp->cookie))
+        || !(p = xdr_decode_string_inplace(p, &lock->caller,
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index d505990dac7c..c364acbb6aba 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1016,8 +1016,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct 
nfsd4_compound_state *cstate,
+ 
+       nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
+                                     &write->wr_head, write->wr_buflen);
+-      if (!nvecs)
+-              return nfserr_io;
+       WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
+ 
+       status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
+diff --git a/include/linux/hmm.h b/include/linux/hmm.h
+index c6fb869a81c0..ed89fbc525d2 100644
+--- a/include/linux/hmm.h
++++ b/include/linux/hmm.h
+@@ -512,8 +512,7 @@ struct hmm_devmem {
+  * enough and allocate struct page for it.
+  *
+  * The device driver can wrap the hmm_devmem struct inside a private device
+- * driver struct. The device driver must call hmm_devmem_remove() before the
+- * device goes away and before freeing the hmm_devmem struct memory.
++ * driver struct.
+  */
+ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
+                                 struct device *device,
+@@ -521,7 +520,6 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
+ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
+                                          struct device *device,
+                                          struct resource *res);
+-void hmm_devmem_remove(struct hmm_devmem *devmem);
+ 
+ /*
+  * hmm_devmem_page_set_drvdata - set per-page driver data field
+diff --git a/include/linux/memremap.h b/include/linux/memremap.h
+index 0ac69ddf5fc4..55db66b3716f 100644
+--- a/include/linux/memremap.h
++++ b/include/linux/memremap.h
+@@ -111,6 +111,7 @@ typedef void (*dev_page_free_t)(struct page *page, void 
*data);
+  * @altmap: pre-allocated/reserved memory for vmemmap allocations
+  * @res: physical address range covered by @ref
+  * @ref: reference count that pins the devm_memremap_pages() mapping
++ * @kill: callback to transition @ref to the dead state
+  * @dev: host device of the mapping for debug
+  * @data: private data pointer for page_free()
+  * @type: memory type: see MEMORY_* in memory_hotplug.h
+@@ -122,6 +123,7 @@ struct dev_pagemap {
+       bool altmap_valid;
+       struct resource res;
+       struct percpu_ref *ref;
++      void (*kill)(struct percpu_ref *ref);
+       struct device *dev;
+       void *data;
+       enum memory_type type;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index e2a5156bc9c3..3c16bc490583 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1837,8 +1837,6 @@ static __latent_entropy struct task_struct *copy_process(
+ 
+       posix_cpu_timers_init(p);
+ 
+-      p->start_time = ktime_get_ns();
+-      p->real_start_time = ktime_get_boot_ns();
+       p->io_context = NULL;
+       audit_set_context(p, NULL);
+       cgroup_fork(p);
+@@ -2004,6 +2002,17 @@ static __latent_entropy struct task_struct 
*copy_process(
+       if (retval)
+               goto bad_fork_free_pid;
+ 
++      /*
++       * From this point on we must avoid any synchronous user-space
++       * communication until we take the tasklist-lock. In particular, we do
++       * not want user-space to be able to predict the process start-time by
++       * stalling fork(2) after we recorded the start_time but before it is
++       * visible to the system.
++       */
++
++      p->start_time = ktime_get_ns();
++      p->real_start_time = ktime_get_boot_ns();
++
+       /*
+        * Make it visible to the rest of the system, but dont wake it up yet.
+        * Need tasklist lock for parent etc handling!
+diff --git a/kernel/memremap.c b/kernel/memremap.c
+index 9eced2cc9f94..3eef989ef035 100644
+--- a/kernel/memremap.c
++++ b/kernel/memremap.c
+@@ -88,23 +88,25 @@ static void devm_memremap_pages_release(void *data)
+       resource_size_t align_start, align_size;
+       unsigned long pfn;
+ 
++      pgmap->kill(pgmap->ref);
+       for_each_device_pfn(pfn, pgmap)
+               put_page(pfn_to_page(pfn));
+ 
+-      if (percpu_ref_tryget_live(pgmap->ref)) {
+-              dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
+-              percpu_ref_put(pgmap->ref);
+-      }
+-
+       /* pages are dead and unused, undo the arch mapping */
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
+               - align_start;
+ 
+       mem_hotplug_begin();
+-      arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
+-                      &pgmap->altmap : NULL);
+-      kasan_remove_zero_shadow(__va(align_start), align_size);
++      if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
++              pfn = align_start >> PAGE_SHIFT;
++              __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
++                              align_size >> PAGE_SHIFT, NULL);
++      } else {
++              arch_remove_memory(align_start, align_size,
++                              pgmap->altmap_valid ? &pgmap->altmap : NULL);
++              kasan_remove_zero_shadow(__va(align_start), align_size);
++      }
+       mem_hotplug_done();
+ 
+       untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+@@ -116,7 +118,7 @@ static void devm_memremap_pages_release(void *data)
+ /**
+  * devm_memremap_pages - remap and provide memmap backing for the given 
resource
+  * @dev: hosting device for @res
+- * @pgmap: pointer to a struct dev_pgmap
++ * @pgmap: pointer to a struct dev_pagemap
+  *
+  * Notes:
+  * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
+@@ -125,11 +127,8 @@ static void devm_memremap_pages_release(void *data)
+  * 2/ The altmap field may optionally be initialized, in which case 
altmap_valid
+  *    must be set to true
+  *
+- * 3/ pgmap.ref must be 'live' on entry and 'dead' before 
devm_memunmap_pages()
+- *    time (or devm release event). The expected order of events is that ref 
has
+- *    been through percpu_ref_kill() before devm_memremap_pages_release(). The
+- *    wait for the completion of all references being dropped and
+- *    percpu_ref_exit() must occur after devm_memremap_pages_release().
++ * 3/ pgmap->ref must be 'live' on entry and will be killed at
++ *    devm_memremap_pages_release() time, or if this routine fails.
+  *
+  * 4/ res is expected to be a host memory range that could feasibly be
+  *    treated as a "System RAM" range, i.e. not a device mmio range, but
+@@ -145,6 +144,9 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
+       pgprot_t pgprot = PAGE_KERNEL;
+       int error, nid, is_ram;
+ 
++      if (!pgmap->ref || !pgmap->kill)
++              return ERR_PTR(-EINVAL);
++
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
+               - align_start;
+@@ -167,18 +169,13 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
+       is_ram = region_intersects(align_start, align_size,
+               IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
+ 
+-      if (is_ram == REGION_MIXED) {
+-              WARN_ONCE(1, "%s attempted on mixed region %pr\n",
+-                              __func__, res);
+-              return ERR_PTR(-ENXIO);
++      if (is_ram != REGION_DISJOINT) {
++              WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
++                              is_ram == REGION_MIXED ? "mixed" : "ram", res);
++              error = -ENXIO;
++              goto err_array;
+       }
+ 
+-      if (is_ram == REGION_INTERSECTS)
+-              return __va(res->start);
+-
+-      if (!pgmap->ref)
+-              return ERR_PTR(-EINVAL);
+-
+       pgmap->dev = dev;
+ 
+       error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
+@@ -196,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
+               goto err_pfn_remap;
+ 
+       mem_hotplug_begin();
+-      error = kasan_add_zero_shadow(__va(align_start), align_size);
+-      if (error) {
+-              mem_hotplug_done();
+-              goto err_kasan;
++
++      /*
++       * For device private memory we call add_pages() as we only need to
++       * allocate and initialize struct page for the device memory. More-
++       * over the device memory is un-accessible thus we do not want to
++       * create a linear mapping for the memory like arch_add_memory()
++       * would do.
++       *
++       * For all other device memory types, which are accessible by
++       * the CPU, we do want the linear mapping and thus use
++       * arch_add_memory().
++       */
++      if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
++              error = add_pages(nid, align_start >> PAGE_SHIFT,
++                              align_size >> PAGE_SHIFT, NULL, false);
++      } else {
++              error = kasan_add_zero_shadow(__va(align_start), align_size);
++              if (error) {
++                      mem_hotplug_done();
++                      goto err_kasan;
++              }
++
++              error = arch_add_memory(nid, align_start, align_size, altmap,
++                              false);
++      }
++
++      if (!error) {
++              struct zone *zone;
++
++              zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
++              move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
++                              align_size >> PAGE_SHIFT, altmap);
+       }
+ 
+-      error = arch_add_memory(nid, align_start, align_size, altmap, false);
+-      if (!error)
+-              move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
+-                                      align_start >> PAGE_SHIFT,
+-                                      align_size >> PAGE_SHIFT, altmap);
+       mem_hotplug_done();
+       if (error)
+               goto err_add_memory;
+@@ -220,7 +240,10 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
+                               align_size >> PAGE_SHIFT, pgmap);
+       percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
+ 
+-      devm_add_action(dev, devm_memremap_pages_release, pgmap);
++      error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
++                      pgmap);
++      if (error)
++              return ERR_PTR(error);
+ 
+       return __va(res->start);
+ 
+@@ -231,9 +254,10 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
+  err_pfn_remap:
+       pgmap_array_delete(res);
+  err_array:
++      pgmap->kill(pgmap->ref);
+       return ERR_PTR(error);
+ }
+-EXPORT_SYMBOL(devm_memremap_pages);
++EXPORT_SYMBOL_GPL(devm_memremap_pages);
+ 
+ unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+ {
+diff --git a/kernel/pid.c b/kernel/pid.c
+index b2f6c506035d..20881598bdfa 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -233,8 +233,10 @@ out_unlock:
+ 
+ out_free:
+       spin_lock_irq(&pidmap_lock);
+-      while (++i <= ns->level)
+-              idr_remove(&ns->idr, (pid->numbers + i)->nr);
++      while (++i <= ns->level) {
++              upid = pid->numbers + i;
++              idr_remove(&upid->ns->idr, upid->nr);
++      }
+ 
+       /* On failure to allocate the first pid, reset the state */
+       if (ns->pid_allocated == PIDNS_ADDING)
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index a8846ed7f352..a180abc8c925 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -451,10 +451,12 @@ static void srcu_gp_start(struct srcu_struct *sp)
+ 
+       lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
+       WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
++      spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
+       rcu_segcblist_advance(&sdp->srcu_cblist,
+                             rcu_seq_current(&sp->srcu_gp_seq));
+       (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+                                      rcu_seq_snap(&sp->srcu_gp_seq));
++      spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
+       smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
+       rcu_seq_start(&sp->srcu_gp_seq);
+       state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index ac855b2f4774..e8f191ba3fe5 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -352,10 +352,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq 
*cfs_rq)
+       }
+ }
+ 
+-/* Iterate thr' all leaf cfs_rq's on a runqueue */
+-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)                    \
+-      list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,    \
+-                               leaf_cfs_rq_list)
++/* Iterate through all leaf cfs_rq's on a runqueue: */
++#define for_each_leaf_cfs_rq(rq, cfs_rq) \
++      list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+ 
+ /* Do the two (enqueued) entities belong to the same group ? */
+ static inline struct cfs_rq *
+@@ -447,8 +446,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq 
*cfs_rq)
+ {
+ }
+ 
+-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)    \
+-              for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
++#define for_each_leaf_cfs_rq(rq, cfs_rq)      \
++              for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+ 
+ static inline struct sched_entity *parent_entity(struct sched_entity *se)
+ {
+@@ -7387,27 +7386,10 @@ static inline bool others_have_blocked(struct rq *rq)
+ 
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ 
+-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+-{
+-      if (cfs_rq->load.weight)
+-              return false;
+-
+-      if (cfs_rq->avg.load_sum)
+-              return false;
+-
+-      if (cfs_rq->avg.util_sum)
+-              return false;
+-
+-      if (cfs_rq->avg.runnable_load_sum)
+-              return false;
+-
+-      return true;
+-}
+-
+ static void update_blocked_averages(int cpu)
+ {
+       struct rq *rq = cpu_rq(cpu);
+-      struct cfs_rq *cfs_rq, *pos;
++      struct cfs_rq *cfs_rq;
+       const struct sched_class *curr_class;
+       struct rq_flags rf;
+       bool done = true;
+@@ -7419,7 +7401,7 @@ static void update_blocked_averages(int cpu)
+        * Iterates the task_group tree in a bottom up fashion, see
+        * list_add_leaf_cfs_rq() for details.
+        */
+-      for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
++      for_each_leaf_cfs_rq(rq, cfs_rq) {
+               struct sched_entity *se;
+ 
+               /* throttled entities do not contribute to load */
+@@ -7434,13 +7416,6 @@ static void update_blocked_averages(int cpu)
+               if (se && !skip_blocked_update(se))
+                       update_load_avg(cfs_rq_of(se), se, 0);
+ 
+-              /*
+-               * There can be a lot of idle CPU cgroups.  Don't let fully
+-               * decayed cfs_rqs linger on the list.
+-               */
+-              if (cfs_rq_is_decayed(cfs_rq))
+-                      list_del_leaf_cfs_rq(cfs_rq);
+-
+               /* Don't need periodic decay once load/util_avg are null */
+               if (cfs_rq_has_blocked(cfs_rq))
+                       done = false;
+@@ -10289,10 +10264,10 @@ const struct sched_class fair_sched_class = {
+ #ifdef CONFIG_SCHED_DEBUG
+ void print_cfs_stats(struct seq_file *m, int cpu)
+ {
+-      struct cfs_rq *cfs_rq, *pos;
++      struct cfs_rq *cfs_rq;
+ 
+       rcu_read_lock();
+-      for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
++      for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
+               print_cfs_rq(m, cpu, cfs_rq);
+       rcu_read_unlock();
+ }
+diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
+index d5a06addeb27..bf864c73e462 100644
+--- a/lib/test_debug_virtual.c
++++ b/lib/test_debug_virtual.c
+@@ -5,6 +5,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/slab.h>
+ #include <linux/sizes.h>
++#include <linux/io.h>
+ 
+ #include <asm/page.h>
+ #ifdef CONFIG_MIPS
+diff --git a/mm/hmm.c b/mm/hmm.c
+index 90c34f3d1243..50fbaf80f95e 100644
+--- a/mm/hmm.c
++++ b/mm/hmm.c
+@@ -986,19 +986,16 @@ static void hmm_devmem_ref_exit(void *data)
+       struct hmm_devmem *devmem;
+ 
+       devmem = container_of(ref, struct hmm_devmem, ref);
++      wait_for_completion(&devmem->completion);
+       percpu_ref_exit(ref);
+-      devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
+ }
+ 
+-static void hmm_devmem_ref_kill(void *data)
++static void hmm_devmem_ref_kill(struct percpu_ref *ref)
+ {
+-      struct percpu_ref *ref = data;
+       struct hmm_devmem *devmem;
+ 
+       devmem = container_of(ref, struct hmm_devmem, ref);
+       percpu_ref_kill(ref);
+-      wait_for_completion(&devmem->completion);
+-      devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
+ }
+ 
+ static int hmm_devmem_fault(struct vm_area_struct *vma,
+@@ -1021,172 +1018,6 @@ static void hmm_devmem_free(struct page *page, void 
*data)
+       devmem->ops->free(devmem, page);
+ }
+ 
+-static DEFINE_MUTEX(hmm_devmem_lock);
+-static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
+-
+-static void hmm_devmem_radix_release(struct resource *resource)
+-{
+-      resource_size_t key;
+-
+-      mutex_lock(&hmm_devmem_lock);
+-      for (key = resource->start;
+-           key <= resource->end;
+-           key += PA_SECTION_SIZE)
+-              radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
+-      mutex_unlock(&hmm_devmem_lock);
+-}
+-
+-static void hmm_devmem_release(struct device *dev, void *data)
+-{
+-      struct hmm_devmem *devmem = data;
+-      struct resource *resource = devmem->resource;
+-      unsigned long start_pfn, npages;
+-      struct zone *zone;
+-      struct page *page;
+-
+-      if (percpu_ref_tryget_live(&devmem->ref)) {
+-              dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
+-              percpu_ref_put(&devmem->ref);
+-      }
+-
+-      /* pages are dead and unused, undo the arch mapping */
+-      start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
+-      npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
+-
+-      page = pfn_to_page(start_pfn);
+-      zone = page_zone(page);
+-
+-      mem_hotplug_begin();
+-      if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
+-              __remove_pages(zone, start_pfn, npages, NULL);
+-      else
+-              arch_remove_memory(start_pfn << PAGE_SHIFT,
+-                                 npages << PAGE_SHIFT, NULL);
+-      mem_hotplug_done();
+-
+-      hmm_devmem_radix_release(resource);
+-}
+-
+-static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
+-{
+-      resource_size_t key, align_start, align_size, align_end;
+-      struct device *device = devmem->device;
+-      int ret, nid, is_ram;
+-
+-      align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
+-      align_size = ALIGN(devmem->resource->start +
+-                         resource_size(devmem->resource),
+-                         PA_SECTION_SIZE) - align_start;
+-
+-      is_ram = region_intersects(align_start, align_size,
+-                                 IORESOURCE_SYSTEM_RAM,
+-                                 IORES_DESC_NONE);
+-      if (is_ram == REGION_MIXED) {
+-              WARN_ONCE(1, "%s attempted on mixed region %pr\n",
+-                              __func__, devmem->resource);
+-              return -ENXIO;
+-      }
+-      if (is_ram == REGION_INTERSECTS)
+-              return -ENXIO;
+-
+-      if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
+-              devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
+-      else
+-              devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+-
+-      devmem->pagemap.res = *devmem->resource;
+-      devmem->pagemap.page_fault = hmm_devmem_fault;
+-      devmem->pagemap.page_free = hmm_devmem_free;
+-      devmem->pagemap.dev = devmem->device;
+-      devmem->pagemap.ref = &devmem->ref;
+-      devmem->pagemap.data = devmem;
+-
+-      mutex_lock(&hmm_devmem_lock);
+-      align_end = align_start + align_size - 1;
+-      for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
+-              struct hmm_devmem *dup;
+-
+-              dup = radix_tree_lookup(&hmm_devmem_radix,
+-                                      key >> PA_SECTION_SHIFT);
+-              if (dup) {
+-                      dev_err(device, "%s: collides with mapping for %s\n",
+-                              __func__, dev_name(dup->device));
+-                      mutex_unlock(&hmm_devmem_lock);
+-                      ret = -EBUSY;
+-                      goto error;
+-              }
+-              ret = radix_tree_insert(&hmm_devmem_radix,
+-                                      key >> PA_SECTION_SHIFT,
+-                                      devmem);
+-              if (ret) {
+-                      dev_err(device, "%s: failed: %d\n", __func__, ret);
+-                      mutex_unlock(&hmm_devmem_lock);
+-                      goto error_radix;
+-              }
+-      }
+-      mutex_unlock(&hmm_devmem_lock);
+-
+-      nid = dev_to_node(device);
+-      if (nid < 0)
+-              nid = numa_mem_id();
+-
+-      mem_hotplug_begin();
+-      /*
+-       * For device private memory we call add_pages() as we only need to
+-       * allocate and initialize struct page for the device memory. More-
+-       * over the device memory is un-accessible thus we do not want to
+-       * create a linear mapping for the memory like arch_add_memory()
+-       * would do.
+-       *
+-       * For device public memory, which is accesible by the CPU, we do
+-       * want the linear mapping and thus use arch_add_memory().
+-       */
+-      if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
+-              ret = arch_add_memory(nid, align_start, align_size, NULL,
+-                              false);
+-      else
+-              ret = add_pages(nid, align_start >> PAGE_SHIFT,
+-                              align_size >> PAGE_SHIFT, NULL, false);
+-      if (ret) {
+-              mem_hotplug_done();
+-              goto error_add_memory;
+-      }
+-      move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
+-                              align_start >> PAGE_SHIFT,
+-                              align_size >> PAGE_SHIFT, NULL);
+-      mem_hotplug_done();
+-
+-      /*
+-       * Initialization of the pages has been deferred until now in order
+-       * to allow us to do the work while not holding the hotplug lock.
+-       */
+-      memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
+-                              align_start >> PAGE_SHIFT,
+-                              align_size >> PAGE_SHIFT, &devmem->pagemap);
+-
+-      return 0;
+-
+-error_add_memory:
+-      untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+-error_radix:
+-      hmm_devmem_radix_release(devmem->resource);
+-error:
+-      return ret;
+-}
+-
+-static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
+-{
+-      struct hmm_devmem *devmem = data;
+-
+-      return devmem->resource == match_data;
+-}
+-
+-static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
+-{
+-      devres_release(devmem->device, &hmm_devmem_release,
+-                     &hmm_devmem_match, devmem->resource);
+-}
+-
+ /*
+  * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
+  *
+@@ -1210,12 +1041,12 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
+ {
+       struct hmm_devmem *devmem;
+       resource_size_t addr;
++      void *result;
+       int ret;
+ 
+       dev_pagemap_get_ops();
+ 
+-      devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
+-                                 GFP_KERNEL, dev_to_node(device));
++      devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
+       if (!devmem)
+               return ERR_PTR(-ENOMEM);
+ 
+@@ -1229,11 +1060,11 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
+       ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
+                             0, GFP_KERNEL);
+       if (ret)
+-              goto error_percpu_ref;
++              return ERR_PTR(ret);
+ 
+-      ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
++      ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, 
&devmem->ref);
+       if (ret)
+-              goto error_devm_add_action;
++              return ERR_PTR(ret);
+ 
+       size = ALIGN(size, PA_SECTION_SIZE);
+       addr = min((unsigned long)iomem_resource.end,
+@@ -1253,54 +1084,40 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
+ 
+               devmem->resource = devm_request_mem_region(device, addr, size,
+                                                          dev_name(device));
+-              if (!devmem->resource) {
+-                      ret = -ENOMEM;
+-                      goto error_no_resource;
+-              }
++              if (!devmem->resource)
++                      return ERR_PTR(-ENOMEM);
+               break;
+       }
+-      if (!devmem->resource) {
+-              ret = -ERANGE;
+-              goto error_no_resource;
+-      }
++      if (!devmem->resource)
++              return ERR_PTR(-ERANGE);
+ 
+       devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
+       devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
+       devmem->pfn_last = devmem->pfn_first +
+                          (resource_size(devmem->resource) >> PAGE_SHIFT);
+ 
+-      ret = hmm_devmem_pages_create(devmem);
+-      if (ret)
+-              goto error_pages;
+-
+-      devres_add(device, devmem);
+-
+-      ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
+-      if (ret) {
+-              hmm_devmem_remove(devmem);
+-              return ERR_PTR(ret);
+-      }
++      devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
++      devmem->pagemap.res = *devmem->resource;
++      devmem->pagemap.page_fault = hmm_devmem_fault;
++      devmem->pagemap.page_free = hmm_devmem_free;
++      devmem->pagemap.altmap_valid = false;
++      devmem->pagemap.ref = &devmem->ref;
++      devmem->pagemap.data = devmem;
++      devmem->pagemap.kill = hmm_devmem_ref_kill;
+ 
++      result = devm_memremap_pages(devmem->device, &devmem->pagemap);
++      if (IS_ERR(result))
++              return result;
+       return devmem;
+-
+-error_pages:
+-      devm_release_mem_region(device, devmem->resource->start,
+-                              resource_size(devmem->resource));
+-error_no_resource:
+-error_devm_add_action:
+-      hmm_devmem_ref_kill(&devmem->ref);
+-      hmm_devmem_ref_exit(&devmem->ref);
+-error_percpu_ref:
+-      devres_free(devmem);
+-      return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL(hmm_devmem_add);
++EXPORT_SYMBOL_GPL(hmm_devmem_add);
+ 
+ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
+                                          struct device *device,
+                                          struct resource *res)
+ {
+       struct hmm_devmem *devmem;
++      void *result;
+       int ret;
+ 
+       if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
+@@ -1308,8 +1125,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct 
hmm_devmem_ops *ops,
+ 
+       dev_pagemap_get_ops();
+ 
+-      devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
+-                                 GFP_KERNEL, dev_to_node(device));
++      devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
+       if (!devmem)
+               return ERR_PTR(-ENOMEM);
+ 
+@@ -1323,71 +1139,32 @@ struct hmm_devmem *hmm_devmem_add_resource(const 
struct hmm_devmem_ops *ops,
+       ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
+                             0, GFP_KERNEL);
+       if (ret)
+-              goto error_percpu_ref;
++              return ERR_PTR(ret);
+ 
+-      ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
++      ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
++                      &devmem->ref);
+       if (ret)
+-              goto error_devm_add_action;
+-
++              return ERR_PTR(ret);
+ 
+       devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
+       devmem->pfn_last = devmem->pfn_first +
+                          (resource_size(devmem->resource) >> PAGE_SHIFT);
+ 
+-      ret = hmm_devmem_pages_create(devmem);
+-      if (ret)
+-              goto error_devm_add_action;
+-
+-      devres_add(device, devmem);
+-
+-      ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
+-      if (ret) {
+-              hmm_devmem_remove(devmem);
+-              return ERR_PTR(ret);
+-      }
++      devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
++      devmem->pagemap.res = *devmem->resource;
++      devmem->pagemap.page_fault = hmm_devmem_fault;
++      devmem->pagemap.page_free = hmm_devmem_free;
++      devmem->pagemap.altmap_valid = false;
++      devmem->pagemap.ref = &devmem->ref;
++      devmem->pagemap.data = devmem;
++      devmem->pagemap.kill = hmm_devmem_ref_kill;
+ 
++      result = devm_memremap_pages(devmem->device, &devmem->pagemap);
++      if (IS_ERR(result))
++              return result;
+       return devmem;
+-
+-error_devm_add_action:
+-      hmm_devmem_ref_kill(&devmem->ref);
+-      hmm_devmem_ref_exit(&devmem->ref);
+-error_percpu_ref:
+-      devres_free(devmem);
+-      return ERR_PTR(ret);
+-}
+-EXPORT_SYMBOL(hmm_devmem_add_resource);
+-
+-/*
+- * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
+- *
+- * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
+- *
+- * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
+- * of the device driver. It will free struct page and remove the resource that
+- * reserved the physical address range for this device memory.
+- */
+-void hmm_devmem_remove(struct hmm_devmem *devmem)
+-{
+-      resource_size_t start, size;
+-      struct device *device;
+-      bool cdm = false;
+-
+-      if (!devmem)
+-              return;
+-
+-      device = devmem->device;
+-      start = devmem->resource->start;
+-      size = resource_size(devmem->resource);
+-
+-      cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
+-      hmm_devmem_ref_kill(&devmem->ref);
+-      hmm_devmem_ref_exit(&devmem->ref);
+-      hmm_devmem_pages_remove(devmem);
+-
+-      if (!cdm)
+-              devm_release_mem_region(device, start, size);
+ }
+-EXPORT_SYMBOL(hmm_devmem_remove);
++EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
+ 
+ /*
+  * A device driver that wants to handle multiple devices memory through a
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 6e1469b80cb7..7e6bf74ddb1e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1666,6 +1666,9 @@ enum oom_status {
+ 
+ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, 
int order)
+ {
++      enum oom_status ret;
++      bool locked;
++
+       if (order > PAGE_ALLOC_COSTLY_ORDER)
+               return OOM_SKIPPED;
+ 
+@@ -1700,10 +1703,23 @@ static enum oom_status mem_cgroup_oom(struct 
mem_cgroup *memcg, gfp_t mask, int
+               return OOM_ASYNC;
+       }
+ 
++      mem_cgroup_mark_under_oom(memcg);
++
++      locked = mem_cgroup_oom_trylock(memcg);
++
++      if (locked)
++              mem_cgroup_oom_notify(memcg);
++
++      mem_cgroup_unmark_under_oom(memcg);
+       if (mem_cgroup_out_of_memory(memcg, mask, order))
+-              return OOM_SUCCESS;
++              ret = OOM_SUCCESS;
++      else
++              ret = OOM_FAILED;
+ 
+-      return OOM_FAILED;
++      if (locked)
++              mem_cgroup_oom_unlock(memcg);
++
++      return ret;
+ }
+ 
+ /**
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 2b2b3ccbbfb5..cea0880eadfb 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -34,6 +34,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/memblock.h>
+ #include <linux/compaction.h>
++#include <linux/rmap.h>
+ 
+ #include <asm/tlbflush.h>
+ 
+@@ -1369,6 +1370,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long 
end_pfn)
+                       pfn = page_to_pfn(compound_head(page))
+                               + hpage_nr_pages(page) - 1;
+ 
++              /*
++               * HWPoison pages have elevated reference counts so the 
migration would
++               * fail on them. It also doesn't make any sense to migrate them 
in the
++               * first place. Still try to unmap such a page in case it is 
still mapped
++               * (e.g. current hwpoison implementation doesn't unmap KSM 
pages but keep
++               * the unmap as the catch all safety net).
++               */
++              if (PageHWPoison(page)) {
++                      if (WARN_ON(PageLRU(page)))
++                              isolate_lru_page(page);
++                      if (page_mapped(page))
++                              try_to_unmap(page, TTU_IGNORE_MLOCK | 
TTU_IGNORE_ACCESS);
++                      continue;
++              }
++
+               if (!get_page_unless_zero(page))
+                       continue;
+               /*
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 8688ae65ef58..20d3c0f47a5f 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2197,7 +2197,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
+                */
+               if (PageSwapCache(page) &&
+                   likely(page_private(page) == entry.val) &&
+-                  !page_swapped(page))
++                  (!PageTransCompound(page) ||
++                   !swap_page_trans_huge_swapped(si, entry)))
+                       delete_from_swap_cache(compound_head(page));
+ 
+               /*
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 2c9a17b9b46b..357214a51f13 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -181,6 +181,12 @@ static int parse_opts(char *opts, struct p9_client *clnt)
+                               ret = r;
+                               continue;
+                       }
++                      if (option < 4096) {
++                              p9_debug(P9_DEBUG_ERROR,
++                                       "msize should be at least 4k\n");
++                              ret = -EINVAL;
++                              continue;
++                      }
+                       clnt->msize = option;
+                       break;
+               case Opt_trans:
+@@ -983,10 +989,18 @@ static int p9_client_version(struct p9_client *c)
+       else if (!strncmp(version, "9P2000", 6))
+               c->proto_version = p9_proto_legacy;
+       else {
++              p9_debug(P9_DEBUG_ERROR,
++                       "server returned an unknown version: %s\n", version);
+               err = -EREMOTEIO;
+               goto error;
+       }
+ 
++      if (msize < 4096) {
++              p9_debug(P9_DEBUG_ERROR,
++                       "server returned a msize < 4096: %d\n", msize);
++              err = -EREMOTEIO;
++              goto error;
++      }
+       if (msize < c->msize)
+               c->msize = msize;
+ 
+@@ -1043,6 +1057,13 @@ struct p9_client *p9_client_create(const char 
*dev_name, char *options)
+       if (clnt->msize > clnt->trans_mod->maxsize)
+               clnt->msize = clnt->trans_mod->maxsize;
+ 
++      if (clnt->msize < 4096) {
++              p9_debug(P9_DEBUG_ERROR,
++                       "Please specify a msize of at least 4k\n");
++              err = -EINVAL;
++              goto free_client;
++      }
++
+       err = p9_client_version(clnt);
+       if (err)
+               goto close_trans;
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c 
b/net/sunrpc/auth_gss/svcauth_gss.c
+index 1ece4bc3eb8d..152790ed309c 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1142,7 +1142,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst 
*rqstp,
+       struct kvec *resv = &rqstp->rq_res.head[0];
+       struct rsi *rsip, rsikey;
+       int ret;
+-      struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, 
sunrpc_net_id);
++      struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+ 
+       memset(&rsikey, 0, sizeof(rsikey));
+       ret = gss_read_verf(gc, argv, authp,
+@@ -1253,7 +1253,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
+       uint64_t handle;
+       int status;
+       int ret;
+-      struct net *net = rqstp->rq_xprt->xpt_net;
++      struct net *net = SVC_NET(rqstp);
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+ 
+       memset(&ud, 0, sizeof(ud));
+@@ -1444,7 +1444,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
+       __be32          *rpcstart;
+       __be32          *reject_stat = resv->iov_base + resv->iov_len;
+       int             ret;
+-      struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, 
sunrpc_net_id);
++      struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+ 
+       dprintk("RPC:       svcauth_gss: argv->iov_len = %zd\n",
+                       argv->iov_len);
+@@ -1734,7 +1734,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
+       struct rpc_gss_wire_cred *gc = &gsd->clcred;
+       struct xdr_buf *resbuf = &rqstp->rq_res;
+       int stat = -EINVAL;
+-      struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, 
sunrpc_net_id);
++      struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+ 
+       if (gc->gc_proc != RPC_GSS_PROC_DATA)
+               goto out;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index f96345b1180e..12bb23b8e0c5 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -54,6 +54,11 @@ static void cache_init(struct cache_head *h, struct 
cache_detail *detail)
+       h->last_refresh = now;
+ }
+ 
++static void cache_fresh_locked(struct cache_head *head, time_t expiry,
++                              struct cache_detail *detail);
++static void cache_fresh_unlocked(struct cache_head *head,
++                              struct cache_detail *detail);
++
+ static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
+                                               struct cache_head *key,
+                                               int hash)
+@@ -100,6 +105,7 @@ static struct cache_head *sunrpc_cache_add_entry(struct 
cache_detail *detail,
+                       if (cache_is_expired(detail, tmp)) {
+                               hlist_del_init_rcu(&tmp->cache_list);
+                               detail->entries --;
++                              cache_fresh_locked(tmp, 0, detail);
+                               freeme = tmp;
+                               break;
+                       }
+@@ -115,8 +121,10 @@ static struct cache_head *sunrpc_cache_add_entry(struct 
cache_detail *detail,
+       cache_get(new);
+       spin_unlock(&detail->hash_lock);
+ 
+-      if (freeme)
++      if (freeme) {
++              cache_fresh_unlocked(freeme, detail);
+               cache_put(freeme, detail);
++      }
+       return new;
+ }
+ 
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index fc6378cc0c1c..20ced24cc61b 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -117,15 +117,15 @@ static void
+ frwr_mr_recycle_worker(struct work_struct *work)
+ {
+       struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, 
mr_recycle);
+-      enum rpcrdma_frwr_state state = mr->frwr.fr_state;
+       struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
+ 
+       trace_xprtrdma_mr_recycle(mr);
+ 
+-      if (state != FRWR_FLUSHED_LI) {
++      if (mr->mr_dir != DMA_NONE) {
+               trace_xprtrdma_mr_unmap(mr);
+               ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+                               mr->mr_sg, mr->mr_nents, mr->mr_dir);
++              mr->mr_dir = DMA_NONE;
+       }
+ 
+       spin_lock(&r_xprt->rx_buf.rb_mrlock);
+@@ -150,6 +150,8 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr 
*mr)
+       if (!mr->mr_sg)
+               goto out_list_err;
+ 
++      frwr->fr_state = FRWR_IS_INVALID;
++      mr->mr_dir = DMA_NONE;
+       INIT_LIST_HEAD(&mr->mr_list);
+       INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
+       sg_init_table(mr->mr_sg, depth);
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 3ddba94c939f..b9bc7f9f6bb9 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1329,9 +1329,12 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
+ {
+       struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
+ 
+-      trace_xprtrdma_mr_unmap(mr);
+-      ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+-                      mr->mr_sg, mr->mr_nents, mr->mr_dir);
++      if (mr->mr_dir != DMA_NONE) {
++              trace_xprtrdma_mr_unmap(mr);
++              ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
++                              mr->mr_sg, mr->mr_nents, mr->mr_dir);
++              mr->mr_dir = DMA_NONE;
++      }
+       __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
+ }
+ 
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index f4eadd3f7350..b63ef865ce1e 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -2108,6 +2108,7 @@ static int ocontext_read(struct policydb *p, struct 
policydb_compat_info *info,
+ {
+       int i, j, rc;
+       u32 nel, len;
++      __be64 prefixbuf[1];
+       __le32 buf[3];
+       struct ocontext *l, *c;
+       u32 nodebuf[8];
+@@ -2217,21 +2218,30 @@ static int ocontext_read(struct policydb *p, struct 
policydb_compat_info *info,
+                                       goto out;
+                               break;
+                       }
+-                      case OCON_IBPKEY:
+-                              rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
++                      case OCON_IBPKEY: {
++                              u32 pkey_lo, pkey_hi;
++
++                              rc = next_entry(prefixbuf, fp, sizeof(u64));
++                              if (rc)
++                                      goto out;
++
++                              /* we need to have subnet_prefix in CPU order */
++                              c->u.ibpkey.subnet_prefix = 
be64_to_cpu(prefixbuf[0]);
++
++                              rc = next_entry(buf, fp, sizeof(u32) * 2);
+                               if (rc)
+                                       goto out;
+ 
+-                              c->u.ibpkey.subnet_prefix = 
be64_to_cpu(*((__be64 *)nodebuf));
++                              pkey_lo = le32_to_cpu(buf[0]);
++                              pkey_hi = le32_to_cpu(buf[1]);
+ 
+-                              if (nodebuf[2] > 0xffff ||
+-                                  nodebuf[3] > 0xffff) {
++                              if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
+                                       rc = -EINVAL;
+                                       goto out;
+                               }
+ 
+-                              c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
+-                              c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
++                              c->u.ibpkey.low_pkey  = pkey_lo;
++                              c->u.ibpkey.high_pkey = pkey_hi;
+ 
+                               rc = context_read_and_validate(&c->context[0],
+                                                              p,
+@@ -2239,7 +2249,10 @@ static int ocontext_read(struct policydb *p, struct 
policydb_compat_info *info,
+                               if (rc)
+                                       goto out;
+                               break;
+-                      case OCON_IBENDPORT:
++                      }
++                      case OCON_IBENDPORT: {
++                              u32 port;
++
+                               rc = next_entry(buf, fp, sizeof(u32) * 2);
+                               if (rc)
+                                       goto out;
+@@ -2249,12 +2262,13 @@ static int ocontext_read(struct policydb *p, struct 
policydb_compat_info *info,
+                               if (rc)
+                                       goto out;
+ 
+-                              if (buf[1] > 0xff || buf[1] == 0) {
++                              port = le32_to_cpu(buf[1]);
++                              if (port > U8_MAX || port == 0) {
+                                       rc = -EINVAL;
+                                       goto out;
+                               }
+ 
+-                              c->u.ibendport.port = le32_to_cpu(buf[1]);
++                              c->u.ibendport.port = port;
+ 
+                               rc = context_read_and_validate(&c->context[0],
+                                                              p,
+@@ -2262,7 +2276,8 @@ static int ocontext_read(struct policydb *p, struct 
policydb_compat_info *info,
+                               if (rc)
+                                       goto out;
+                               break;
+-                      }
++                      } /* end case */
++                      } /* end switch */
+               }
+       }
+       rc = 0;
+@@ -3105,6 +3120,7 @@ static int ocontext_write(struct policydb *p, struct 
policydb_compat_info *info,
+ {
+       unsigned int i, j, rc;
+       size_t nel, len;
++      __be64 prefixbuf[1];
+       __le32 buf[3];
+       u32 nodebuf[8];
+       struct ocontext *c;
+@@ -3192,12 +3208,17 @@ static int ocontext_write(struct policydb *p, struct 
policydb_compat_info *info,
+                                       return rc;
+                               break;
+                       case OCON_IBPKEY:
+-                              *((__be64 *)nodebuf) = 
cpu_to_be64(c->u.ibpkey.subnet_prefix);
++                              /* subnet_prefix is in CPU order */
++                              prefixbuf[0] = 
cpu_to_be64(c->u.ibpkey.subnet_prefix);
+ 
+-                              nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
+-                              nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
++                              rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
++                              if (rc)
++                                      return rc;
++
++                              buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
++                              buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
+ 
+-                              rc = put_entry(nodebuf, sizeof(u32), 4, fp);
++                              rc = put_entry(buf, sizeof(u32), 2, fp);
+                               if (rc)
+                                       return rc;
+                               rc = context_write(p, &c->context[0], fp);
+diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
+index 598d140bb7cb..5fc497c6d738 100644
+--- a/sound/pci/cs46xx/dsp_spos.c
++++ b/sound/pci/cs46xx/dsp_spos.c
+@@ -903,6 +903,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
+       struct dsp_spos_instance * ins = chip->dsp_spos_instance;
+       int i;
+ 
++      if (!ins)
++              return 0;
++
+       snd_info_free_entry(ins->proc_sym_info_entry);
+       ins->proc_sym_info_entry = NULL;
+ 
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index a105947eaf55..746a72e23cf9 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio 
*chip, int ctrlif)
+               h1 = snd_usb_find_csint_desc(host_iface->extra,
+                                                        host_iface->extralen,
+                                                        NULL, UAC_HEADER);
+-              if (!h1) {
++              if (!h1 || h1->bLength < sizeof(*h1)) {
+                       dev_err(&dev->dev, "cannot find UAC_HEADER\n");
+                       return -EINVAL;
+               }
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index c63c84b54969..e7d441d0e839 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build 
*state,
+                                      struct uac_mixer_unit_descriptor *desc)
+ {
+       int mu_channels;
++      void *c;
+ 
+-      if (desc->bLength < 11)
++      if (desc->bLength < sizeof(*desc))
+               return -EINVAL;
+       if (!desc->bNrInPins)
+               return -EINVAL;
+@@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build 
*state,
+       case UAC_VERSION_1:
+       case UAC_VERSION_2:
+       default:
++              if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
++                      return 0; /* no bmControls -> skip */
+               mu_channels = uac_mixer_unit_bNrChannels(desc);
+               break;
+       case UAC_VERSION_3:
+@@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build 
*state,
+       }
+ 
+       if (!mu_channels)
+-              return -EINVAL;
++              return 0;
++
++      c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
++      if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
++              return 0; /* no bmControls -> skip */
+ 
+       return mu_channels;
+ }
+@@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int 
id,
+                               struct uac_mixer_unit_descriptor *d = p1;
+ 
+                               err = uac_mixer_unit_get_channels(state, d);
+-                              if (err < 0)
++                              if (err <= 0)
+                                       return err;
+ 
+                               term->channels = err;
+@@ -2068,11 +2075,15 @@ static int parse_audio_input_terminal(struct 
mixer_build *state, int unitid,
+ 
+       if (state->mixer->protocol == UAC_VERSION_2) {
+               struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
++              if (d_v2->bLength < sizeof(*d_v2))
++                      return -EINVAL;
+               control = UAC2_TE_CONNECTOR;
+               term_id = d_v2->bTerminalID;
+               bmctls = le16_to_cpu(d_v2->bmControls);
+       } else if (state->mixer->protocol == UAC_VERSION_3) {
+               struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
++              if (d_v3->bLength < sizeof(*d_v3))
++                      return -EINVAL;
+               control = UAC3_TE_INSERTION;
+               term_id = d_v3->bTerminalID;
+               bmctls = le32_to_cpu(d_v3->bmControls);
+@@ -2118,7 +2129,7 @@ static int parse_audio_mixer_unit(struct mixer_build 
*state, int unitid,
+               if (err < 0)
+                       continue;
+               /* no bmControls field (e.g. Maya44) -> ignore */
+-              if (desc->bLength <= 10 + input_pins)
++              if (!num_outs)
+                       continue;
+               err = check_input_term(state, desc->baSourceID[pin], &iterm);
+               if (err < 0)
+@@ -2314,7 +2325,7 @@ static int build_audio_procunit(struct mixer_build 
*state, int unitid,
+                               char *name)
+ {
+       struct uac_processing_unit_descriptor *desc = raw_desc;
+-      int num_ins = desc->bNrInPins;
++      int num_ins;
+       struct usb_mixer_elem_info *cval;
+       struct snd_kcontrol *kctl;
+       int i, err, nameid, type, len;
+@@ -2329,7 +2340,13 @@ static int build_audio_procunit(struct mixer_build 
*state, int unitid,
+               0, NULL, default_value_info
+       };
+ 
+-      if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
++      if (desc->bLength < 13) {
++              usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", 
name, unitid);
++              return -EINVAL;
++      }
++
++      num_ins = desc->bNrInPins;
++      if (desc->bLength < 13 + num_ins ||
+           desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, 
state->mixer->protocol)) {
+               usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", 
name, unitid);
+               return -EINVAL;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 37fc0447c071..b345beb447bd 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+                                       }
+                               }
+                       },
++                      {
++                              .ifnum = -1
++                      },
+               }
+       }
+ },
+@@ -3369,6 +3372,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+                                       }
+                               }
+                       },
++                      {
++                              .ifnum = -1
++                      },
+               }
+       }
+ },
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 67cf849aa16b..d9e3de495c16 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct 
snd_usb_audio *chip,
+               csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, 
USB_DT_CS_ENDPOINT);
+ 
+       if (!csep || csep->bLength < 7 ||
+-          csep->bDescriptorSubtype != UAC_EP_GENERAL) {
+-              usb_audio_warn(chip,
+-                             "%u:%d : no or invalid class specific endpoint 
descriptor\n",
+-                             iface_no, altsd->bAlternateSetting);
+-              return 0;
+-      }
++          csep->bDescriptorSubtype != UAC_EP_GENERAL)
++              goto error;
+ 
+       if (protocol == UAC_VERSION_1) {
+               attributes = csep->bmAttributes;
+@@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct 
snd_usb_audio *chip,
+               struct uac2_iso_endpoint_descriptor *csep2 =
+                       (struct uac2_iso_endpoint_descriptor *) csep;
+ 
++              if (csep2->bLength < sizeof(*csep2))
++                      goto error;
+               attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
+ 
+               /* emulate the endpoint attributes of a v1 device */
+@@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct 
snd_usb_audio *chip,
+               struct uac3_iso_endpoint_descriptor *csep3 =
+                       (struct uac3_iso_endpoint_descriptor *) csep;
+ 
++              if (csep3->bLength < sizeof(*csep3))
++                      goto error;
+               /* emulate the endpoint attributes of a v1 device */
+               if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
+                       attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
+       }
+ 
+       return attributes;
++
++ error:
++      usb_audio_warn(chip,
++                     "%u:%d : no or invalid class specific endpoint 
descriptor\n",
++                     iface_no, altsd->bAlternateSetting);
++      return 0;
+ }
+ 
+ /* find an input terminal descriptor (either UAC1 or UAC2) with the given
+@@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct 
snd_usb_audio *chip,
+  */
+ static void *
+ snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
+-                                             int terminal_id)
++                                     int terminal_id, bool uac23)
+ {
+       struct uac2_input_terminal_descriptor *term = NULL;
++      size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
++              sizeof(struct uac_input_terminal_descriptor);
+ 
+       while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
+                                              ctrl_iface->extralen,
+                                              term, UAC_INPUT_TERMINAL))) {
++              if (term->bLength < minlen)
++                      continue;
+               if (term->bTerminalID == terminal_id)
+                       return term;
+       }
+@@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct 
usb_host_interface *ctrl_iface,
+       while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
+                                              ctrl_iface->extralen,
+                                              term, UAC_OUTPUT_TERMINAL))) {
+-              if (term->bTerminalID == terminal_id)
++              if (term->bLength >= sizeof(*term) &&
++                  term->bTerminalID == terminal_id)
+                       return term;
+       }
+ 
+@@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
+               format = le16_to_cpu(as->wFormatTag); /* remember the format 
value */
+ 
+               iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
+-                                                           as->bTerminalLink);
++                                                             
as->bTerminalLink,
++                                                             false);
+               if (iterm) {
+                       num_channels = iterm->bNrChannels;
+                       chconfig = le16_to_cpu(iterm->wChannelConfig);
+@@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
+                * to extract the clock
+                */
+               input_term = 
snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
+-                                                                  
as->bTerminalLink);
++                                                                  
as->bTerminalLink,
++                                                                  true);
+               if (input_term) {
+                       clock = input_term->bCSourceID;
+                       if (!chconfig && (num_channels == 
input_term->bNrChannels))
+@@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
+        * to extract the clock
+        */
+       input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
+-                                                          as->bTerminalLink);
++                                                          as->bTerminalLink,
++                                                          true);
+       if (input_term) {
+               clock = input_term->bCSourceID;
+               goto found_clock;
+diff --git a/tools/testing/nvdimm/test/iomap.c 
b/tools/testing/nvdimm/test/iomap.c
+index ff9d3a5825e1..c6635fee27d8 100644
+--- a/tools/testing/nvdimm/test/iomap.c
++++ b/tools/testing/nvdimm/test/iomap.c
+@@ -104,16 +104,29 @@ void *__wrap_devm_memremap(struct device *dev, 
resource_size_t offset,
+ }
+ EXPORT_SYMBOL(__wrap_devm_memremap);
+ 
++static void nfit_test_kill(void *_pgmap)
++{
++      struct dev_pagemap *pgmap = _pgmap;
++
++      pgmap->kill(pgmap->ref);
++}
++
+ void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap 
*pgmap)
+ {
+       resource_size_t offset = pgmap->res.start;
+       struct nfit_test_resource *nfit_res = get_nfit_res(offset);
+ 
+-      if (nfit_res)
++      if (nfit_res) {
++              int rc;
++
++              rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
++              if (rc)
++                      return ERR_PTR(rc);
+               return nfit_res->buf + offset - nfit_res->res.start;
++      }
+       return devm_memremap_pages(dev, pgmap);
+ }
+-EXPORT_SYMBOL(__wrap_devm_memremap_pages);
++EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
+ 
+ pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
+ {
+diff --git a/tools/testing/selftests/android/Makefile 
b/tools/testing/selftests/android/Makefile
+index d9a725478375..72c25a3cb658 100644
+--- a/tools/testing/selftests/android/Makefile
++++ b/tools/testing/selftests/android/Makefile
+@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
+ 
+ include ../lib.mk
+ 
+-all: khdr
++all:
+       @for DIR in $(SUBDIRS); do              \
+               BUILD_TARGET=$(OUTPUT)/$$DIR;   \
+               mkdir $$BUILD_TARGET  -p;       \
+diff --git a/tools/testing/selftests/futex/functional/Makefile 
b/tools/testing/selftests/futex/functional/Makefile
+index ad1eeb14fda7..30996306cabc 100644
+--- a/tools/testing/selftests/futex/functional/Makefile
++++ b/tools/testing/selftests/futex/functional/Makefile
+@@ -19,6 +19,7 @@ TEST_GEN_FILES := \
+ TEST_PROGS := run.sh
+ 
+ top_srcdir = ../../../../..
++KSFT_KHDR_INSTALL := 1
+ include ../../lib.mk
+ 
+ $(TEST_GEN_FILES): $(HEADERS)
+diff --git a/tools/testing/selftests/gpio/Makefile 
b/tools/testing/selftests/gpio/Makefile
+index 46648427d537..07f572a1bd3f 100644
+--- a/tools/testing/selftests/gpio/Makefile
++++ b/tools/testing/selftests/gpio/Makefile
+@@ -10,8 +10,6 @@ TEST_PROGS_EXTENDED := gpio-mockup-chardev
+ GPIODIR := $(realpath ../../../gpio)
+ GPIOOBJ := gpio-utils.o
+ 
+-include ../lib.mk
+-
+ all: $(TEST_PROGS_EXTENDED)
+ 
+ override define CLEAN
+@@ -19,7 +17,9 @@ override define CLEAN
+       $(MAKE) -C $(GPIODIR) OUTPUT=$(GPIODIR)/ clean
+ endef
+ 
+-$(TEST_PROGS_EXTENDED):| khdr
++KSFT_KHDR_INSTALL := 1
++include ../lib.mk
++
+ $(TEST_PROGS_EXTENDED): $(GPIODIR)/$(GPIOOBJ)
+ 
+ $(GPIODIR)/$(GPIOOBJ):
+diff --git a/tools/testing/selftests/kvm/Makefile 
b/tools/testing/selftests/kvm/Makefile
+index 01a219229238..52bfe5e76907 100644
+--- a/tools/testing/selftests/kvm/Makefile
++++ b/tools/testing/selftests/kvm/Makefile
+@@ -1,6 +1,7 @@
+ all:
+ 
+ top_srcdir = ../../../..
++KSFT_KHDR_INSTALL := 1
+ UNAME_M := $(shell uname -m)
+ 
+ LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c 
lib/sparsebit.c
+@@ -44,7 +45,6 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
+ 
+ all: $(STATIC_LIBS)
+ $(TEST_GEN_PROGS): $(STATIC_LIBS)
+-$(STATIC_LIBS):| khdr
+ 
+ cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
+ cscope:
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index 0a8e75886224..8b0f16409ed7 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -16,18 +16,18 @@ TEST_GEN_PROGS := $(patsubst 
%,$(OUTPUT)/%,$(TEST_GEN_PROGS))
+ TEST_GEN_PROGS_EXTENDED := $(patsubst 
%,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
+ TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
+ 
++ifdef KSFT_KHDR_INSTALL
+ top_srcdir ?= ../../../..
+ include $(top_srcdir)/scripts/subarch.include
+ ARCH          ?= $(SUBARCH)
+ 
+-all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
+-
+ .PHONY: khdr
+ khdr:
+       make ARCH=$(ARCH) -C $(top_srcdir) headers_install
+ 
+-ifdef KSFT_KHDR_INSTALL
+-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
++all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
++else
++all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
+ endif
+ 
+ .ONESHELL:
+diff --git a/tools/testing/selftests/networking/timestamping/Makefile 
b/tools/testing/selftests/networking/timestamping/Makefile
+index 14cfcf006936..c46c0eefab9e 100644
+--- a/tools/testing/selftests/networking/timestamping/Makefile
++++ b/tools/testing/selftests/networking/timestamping/Makefile
+@@ -6,6 +6,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping 
txtimestamp
+ all: $(TEST_PROGS)
+ 
+ top_srcdir = ../../../../..
++KSFT_KHDR_INSTALL := 1
+ include ../../lib.mk
+ 
+ clean:
+diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile 
b/tools/testing/selftests/tc-testing/bpf/Makefile
+index dc92eb271d9a..be5a5e542804 100644
+--- a/tools/testing/selftests/tc-testing/bpf/Makefile
++++ b/tools/testing/selftests/tc-testing/bpf/Makefile
+@@ -4,6 +4,7 @@ APIDIR := ../../../../include/uapi
+ TEST_GEN_FILES = action.o
+ 
+ top_srcdir = ../../../../..
++KSFT_KHDR_INSTALL := 1
+ include ../../lib.mk
+ 
+ CLANG ?= clang
+diff --git a/tools/testing/selftests/vm/Makefile 
b/tools/testing/selftests/vm/Makefile
+index 6e67e726e5a5..e13eb6cc8901 100644
+--- a/tools/testing/selftests/vm/Makefile
++++ b/tools/testing/selftests/vm/Makefile
+@@ -25,6 +25,7 @@ TEST_GEN_FILES += virtual_address_range
+ 
+ TEST_PROGS := run_vmtests
+ 
++KSFT_KHDR_INSTALL := 1
+ include ../lib.mk
+ 
+ $(OUTPUT)/userfaultfd: LDLIBS += -lpthread

Reply via email to