commit:     1df164e6f34617401f7debfc98b37eb847aaeb0c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat May 25 15:15:39 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat May 25 15:15:39 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1df164e6

Linux patch 5.15.160

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1159_linux-5.15.160.patch | 1084 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1088 insertions(+)

diff --git a/0000_README b/0000_README
index a84cab69..4ceb2624 100644
--- a/0000_README
+++ b/0000_README
@@ -679,6 +679,10 @@ Patch:  1158_linux-5.15.159.patch
 From:   https://www.kernel.org
 Desc:   Linux 5.15.159
 
+Patch:  1159_linux-5.15.160.patch
+From:   https://www.kernel.org
+Desc:   Linux 5.15.160
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1159_linux-5.15.160.patch b/1159_linux-5.15.160.patch
new file mode 100644
index 00000000..6460b394
--- /dev/null
+++ b/1159_linux-5.15.160.patch
@@ -0,0 +1,1084 @@
+diff --git a/Documentation/admin-guide/hw-vuln/core-scheduling.rst 
b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+index 0febe458597c5..b9ab02325e8bd 100644
+--- a/Documentation/admin-guide/hw-vuln/core-scheduling.rst
++++ b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+@@ -66,8 +66,8 @@ arg4:
+     will be performed for all tasks in the task group of ``pid``.
+ 
+ arg5:
+-    userspace pointer to an unsigned long for storing the cookie returned by
+-    ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
++    userspace pointer to an unsigned long long for storing the cookie returned
++    by ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
+ 
+ In order for a process to push a cookie to, or pull a cookie from a process, 
it
+ is required to have the ptrace access mode: `PTRACE_MODE_READ_REALCREDS` to 
the
+diff --git a/Documentation/sphinx/kernel_include.py 
b/Documentation/sphinx/kernel_include.py
+index f523aa68a36b3..cf601bd058abe 100755
+--- a/Documentation/sphinx/kernel_include.py
++++ b/Documentation/sphinx/kernel_include.py
+@@ -94,7 +94,6 @@ class KernelInclude(Include):
+         # HINT: this is the only line I had to change / commented out:
+         #path = utils.relative_path(None, path)
+ 
+-        path = nodes.reprunicode(path)
+         encoding = self.options.get(
+             'encoding', self.state.document.settings.input_encoding)
+         e_handler=self.state.document.settings.input_encoding_error_handler
+diff --git a/Makefile b/Makefile
+index 5cbfe2be72dd4..bfc863d719783 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 15
+-SUBLEVEL = 159
++SUBLEVEL = 160
+ EXTRAVERSION =
+ NAME = Trick or Treat
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f781ba5d421d1..7bfc037022ad5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9134,13 +9134,20 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu)
+ 
+ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ {
++      /*
++       * Suppress the error code if the vCPU is in Real Mode, as Real Mode
++       * exceptions don't report error codes.  The presence of an error code
++       * is carried with the exception and only stripped when the exception
++       * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
++       * report an error code despite the CPU being in Real Mode.
++       */
++      vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
++
+       trace_kvm_inj_exception(vcpu->arch.exception.nr,
+                               vcpu->arch.exception.has_error_code,
+                               vcpu->arch.exception.error_code,
+                               vcpu->arch.exception.injected);
+ 
+-      if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
+-              vcpu->arch.exception.error_code = false;
+       static_call(kvm_x86_queue_exception)(vcpu);
+ }
+ 
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 11d65e23f1b61..3abd5619a9e6f 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -5165,7 +5165,7 @@ static long binder_ioctl(struct file *filp, unsigned int 
cmd, unsigned long arg)
+                       goto err;
+               break;
+       case BINDER_SET_MAX_THREADS: {
+-              int max_threads;
++              u32 max_threads;
+ 
+               if (copy_from_user(&max_threads, ubuf,
+                                  sizeof(max_threads))) {
+diff --git a/drivers/android/binder_internal.h 
b/drivers/android/binder_internal.h
+index 1ade9799c8d58..da9ead1cff6f5 100644
+--- a/drivers/android/binder_internal.h
++++ b/drivers/android/binder_internal.h
+@@ -420,7 +420,7 @@ struct binder_proc {
+       struct list_head todo;
+       struct binder_stats stats;
+       struct list_head delivered_death;
+-      int max_threads;
++      u32 max_threads;
+       int requested_threads;
+       int requested_threads_started;
+       int tmp_ref;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 439ea256ed252..c963b87014b69 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -820,6 +820,9 @@ int amdgpu_ras_query_error_status(struct amdgpu_device 
*adev,
+       if (!obj)
+               return -EINVAL;
+ 
++      if (!info || info->head.block == AMDGPU_RAS_BLOCK_COUNT)
++              return -EINVAL;
++
+       switch (info->head.block) {
+       case AMDGPU_RAS_BLOCK__UMC:
+               if (adev->umc.ras_funcs &&
+diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c 
b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+index f5b7da0e64c07..c0b860ef2e382 100644
+--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
++++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+@@ -939,7 +939,12 @@ static bool setup_dsc_config(
+       if (!is_dsc_possible)
+               goto done;
+ 
+-      dsc_cfg->num_slices_v = pic_height/slice_height;
++      if (slice_height > 0) {
++              dsc_cfg->num_slices_v = pic_height / slice_height;
++      } else {
++              is_dsc_possible = false;
++              goto done;
++      }
+ 
+       if (target_bandwidth_kbps > 0) {
+               is_dsc_possible = decide_dsc_target_bpp_x16(
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c 
b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index ef8646e91f5da..4b8bb99b58eb2 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2424,14 +2424,18 @@ static void umac_enable_set(struct bcmgenet_priv 
*priv, u32 mask, bool enable)
+ {
+       u32 reg;
+ 
++      spin_lock_bh(&priv->reg_lock);
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+-      if (reg & CMD_SW_RESET)
++      if (reg & CMD_SW_RESET) {
++              spin_unlock_bh(&priv->reg_lock);
+               return;
++      }
+       if (enable)
+               reg |= mask;
+       else
+               reg &= ~mask;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++      spin_unlock_bh(&priv->reg_lock);
+ 
+       /* UniMAC stops on a packet boundary, wait for a full-size packet
+        * to be processed
+@@ -2447,8 +2451,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
+       udelay(10);
+ 
+       /* issue soft reset and disable MAC while updating its registers */
++      spin_lock_bh(&priv->reg_lock);
+       bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+       udelay(2);
++      spin_unlock_bh(&priv->reg_lock);
+ }
+ 
+ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+@@ -3576,16 +3582,19 @@ static void bcmgenet_set_rx_mode(struct net_device 
*dev)
+        * 3. The number of filters needed exceeds the number filters
+        *    supported by the hardware.
+       */
++      spin_lock(&priv->reg_lock);
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+           (nfilter > MAX_MDF_FILTER)) {
+               reg |= CMD_PROMISC;
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++              spin_unlock(&priv->reg_lock);
+               bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+               return;
+       } else {
+               reg &= ~CMD_PROMISC;
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++              spin_unlock(&priv->reg_lock);
+       }
+ 
+       /* update MDF filter */
+@@ -3979,6 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
+               goto err;
+       }
+ 
++      spin_lock_init(&priv->reg_lock);
+       spin_lock_init(&priv->lock);
+ 
+       SET_NETDEV_DEV(dev, &pdev->dev);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h 
b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index 0eeb304a42633..34a3c448d44f3 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -572,6 +572,8 @@ struct bcmgenet_rxnfc_rule {
+ /* device context */
+ struct bcmgenet_priv {
+       void __iomem *base;
++      /* reg_lock: lock to serialize access to shared registers */
++      spinlock_t reg_lock;
+       enum bcmgenet_version version;
+       struct net_device *dev;
+ 
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c 
b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+index f55d9d9c01a85..38d41028e98a0 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+@@ -133,6 +133,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+       }
+ 
+       /* Can't suspend with WoL if MAC is still in reset */
++      spin_lock_bh(&priv->reg_lock);
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (reg & CMD_SW_RESET)
+               reg &= ~CMD_SW_RESET;
+@@ -140,6 +141,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+       /* disable RX */
+       reg &= ~CMD_RX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++      spin_unlock_bh(&priv->reg_lock);
+       mdelay(10);
+ 
+       if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+@@ -185,6 +187,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+       }
+ 
+       /* Enable CRC forward */
++      spin_lock_bh(&priv->reg_lock);
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       priv->crc_fwd_en = 1;
+       reg |= CMD_CRC_FWD;
+@@ -192,6 +195,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+       /* Receiver must be enabled for WOL MP detection */
+       reg |= CMD_RX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++      spin_unlock_bh(&priv->reg_lock);
+ 
+       reg = UMAC_IRQ_MPD_R;
+       if (hfb_enable)
+@@ -238,7 +242,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+       }
+ 
+       /* Disable CRC Forward */
++      spin_lock_bh(&priv->reg_lock);
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_CRC_FWD;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++      spin_unlock_bh(&priv->reg_lock);
+ }
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c 
b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 51f6c94e919e8..8c743e67d9f36 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -91,6 +91,7 @@ void bcmgenet_mii_setup(struct net_device *dev)
+               reg |= RGMII_LINK;
+               bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ 
++              spin_lock_bh(&priv->reg_lock);
+               reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+               reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+                              CMD_HD_EN |
+@@ -103,6 +104,7 @@ void bcmgenet_mii_setup(struct net_device *dev)
+                       reg |= CMD_TX_EN | CMD_RX_EN;
+               }
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++              spin_unlock_bh(&priv->reg_lock);
+ 
+               priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+               bcmgenet_eee_enable_set(dev,
+@@ -264,6 +266,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+        * block for the interface to work
+        */
+       if (priv->ext_phy) {
++              mutex_lock(&phydev->lock);
+               reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+               reg &= ~ID_MODE_DIS;
+               reg |= id_mode_dis;
+@@ -272,6 +275,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+               else
+                       reg |= RGMII_MODE_EN;
+               bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
++              mutex_unlock(&phydev->lock);
+       }
+ 
+       if (init)
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 1c906fc68c1df..46a06067e9947 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -205,6 +205,7 @@ static int pinctrl_register_one_pin(struct pinctrl_dev 
*pctldev,
+                                   const struct pinctrl_pin_desc *pin)
+ {
+       struct pin_desc *pindesc;
++      int error;
+ 
+       pindesc = pin_desc_get(pctldev, pin->number);
+       if (pindesc) {
+@@ -226,18 +227,25 @@ static int pinctrl_register_one_pin(struct pinctrl_dev 
*pctldev,
+       } else {
+               pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", pin->number);
+               if (!pindesc->name) {
+-                      kfree(pindesc);
+-                      return -ENOMEM;
++                      error = -ENOMEM;
++                      goto failed;
+               }
+               pindesc->dynamic_name = true;
+       }
+ 
+       pindesc->drv_data = pin->drv_data;
+ 
+-      radix_tree_insert(&pctldev->pin_desc_tree, pin->number, pindesc);
++      error = radix_tree_insert(&pctldev->pin_desc_tree, pin->number, 
pindesc);
++      if (error)
++              goto failed;
++
+       pr_debug("registered pin %d (%s) on %s\n",
+                pin->number, pindesc->name, pctldev->desc->name);
+       return 0;
++
++failed:
++      kfree(pindesc);
++      return error;
+ }
+ 
+ static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index 9679cc26895e5..211c7e3b848e4 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -126,7 +126,7 @@ static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+ {
+       int ret;
+-      size_t offset;
++      size_t buf_sz, offset;
+ 
+       /* read the ipi buf addr from FW itself first */
+       ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
+@@ -138,6 +138,14 @@ static int scp_ipi_init(struct mtk_scp *scp, const struct 
firmware *fw)
+       }
+       dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
+ 
++      /* Make sure IPI buffer fits in the L2TCM range assigned to this core */
++      buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf);
++
++      if (scp->sram_size < buf_sz + offset) {
++              dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n");
++              return -EOVERFLOW;
++      }
++
+       scp->recv_buf = (struct mtk_share_obj __iomem *)
+                       (scp->sram_base + offset);
+       scp->send_buf = (struct mtk_share_obj __iomem *)
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index 79b7db8580e05..d988511f8b326 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -19,6 +19,7 @@
+ #include <linux/console.h>
+ #include <linux/vt_kern.h>
+ #include <linux/input.h>
++#include <linux/irq_work.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/serial_core.h>
+@@ -48,6 +49,25 @@ static struct kgdb_io               kgdboc_earlycon_io_ops;
+ static int                      (*earlycon_orig_exit)(struct console *con);
+ #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
+ 
++/*
++ * When we leave the debug trap handler we need to reset the keyboard status
++ * (since the original keyboard state gets partially clobbered by kdb use of
++ * the keyboard).
++ *
++ * The path to deliver the reset is somewhat circuitous.
++ *
++ * To deliver the reset we register an input handler, reset the keyboard and
++ * then deregister the input handler. However, to get this done right, we do
++ * have to carefully manage the calling context because we can only register
++ * input handlers from task context.
++ *
++ * In particular we need to trigger the action from the debug trap handler 
with
++ * all its NMI and/or NMI-like oddities. To solve this the kgdboc trap exit 
code
++ * (the "post_exception" callback) uses irq_work_queue(), which is NMI-safe, 
to
++ * schedule a callback from a hardirq context. From there we have to defer the
++ * work again, this time using schedule_work(), to get a callback using the
++ * system workqueue, which runs in task context.
++ */
+ #ifdef CONFIG_KDB_KEYBOARD
+ static int kgdboc_reset_connect(struct input_handler *handler,
+                               struct input_dev *dev,
+@@ -99,10 +119,17 @@ static void kgdboc_restore_input_helper(struct 
work_struct *dummy)
+ 
+ static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+ 
++static void kgdboc_queue_restore_input_helper(struct irq_work *unused)
++{
++      schedule_work(&kgdboc_restore_input_work);
++}
++
++static DEFINE_IRQ_WORK(kgdboc_restore_input_irq_work, 
kgdboc_queue_restore_input_helper);
++
+ static void kgdboc_restore_input(void)
+ {
+       if (likely(system_state == SYSTEM_RUNNING))
+-              schedule_work(&kgdboc_restore_input_work);
++              irq_work_queue(&kgdboc_restore_input_irq_work);
+ }
+ 
+ static int kgdboc_register_kbd(char **cptr)
+@@ -133,6 +160,7 @@ static void kgdboc_unregister_kbd(void)
+                       i--;
+               }
+       }
++      irq_work_sync(&kgdboc_restore_input_irq_work);
+       flush_work(&kgdboc_restore_input_work);
+ }
+ #else /* ! CONFIG_KDB_KEYBOARD */
+diff --git a/drivers/usb/typec/ucsi/displayport.c 
b/drivers/usb/typec/ucsi/displayport.c
+index 73cd5bf350472..2431febc46151 100644
+--- a/drivers/usb/typec/ucsi/displayport.c
++++ b/drivers/usb/typec/ucsi/displayport.c
+@@ -275,8 +275,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+       struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work);
+       int ret;
+ 
+-      mutex_lock(&dp->con->lock);
+-
+       ret = typec_altmode_vdm(dp->alt, dp->header,
+                               dp->vdo_data, dp->vdo_size);
+       if (ret)
+@@ -285,8 +283,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+       dp->vdo_data = NULL;
+       dp->vdo_size = 0;
+       dp->header = 0;
+-
+-      mutex_unlock(&dp->con->lock);
+ }
+ 
+ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 456af7d230cf1..46a0a2d6962e1 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -80,9 +80,6 @@ nfs4_callback_svc(void *vrqstp)
+       set_freezable();
+ 
+       while (!kthread_freezable_should_stop(NULL)) {
+-
+-              if (signal_pending(current))
+-                      flush_signals(current);
+               /*
+                * Listen for a request on the socket
+                */
+@@ -112,11 +109,7 @@ nfs41_callback_svc(void *vrqstp)
+       set_freezable();
+ 
+       while (!kthread_freezable_should_stop(NULL)) {
+-
+-              if (signal_pending(current))
+-                      flush_signals(current);
+-
+-              prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
++              prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_IDLE);
+               spin_lock_bh(&serv->sv_cb_lock);
+               if (!list_empty(&serv->sv_cb_list)) {
+                       req = list_first_entry(&serv->sv_cb_list,
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index c14f5ac1484c9..6779291efca9e 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1317,12 +1317,11 @@ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, 
char *ipaddr,
+               /* found a match */
+               if (ni->nsui_busy) {
+                       /*  wait - and try again */
+-                      prepare_to_wait(&nn->nfsd_ssc_waitq, &wait,
+-                              TASK_INTERRUPTIBLE);
++                      prepare_to_wait(&nn->nfsd_ssc_waitq, &wait, TASK_IDLE);
+                       spin_unlock(&nn->nfsd_ssc_lock);
+ 
+                       /* allow 20secs for mount/unmount for now - revisit */
+-                      if (signal_pending(current) ||
++                      if (kthread_should_stop() ||
+                                       (schedule_timeout(20*HZ) == 0)) {
+                               finish_wait(&nn->nfsd_ssc_waitq, &wait);
+                               kfree(work);
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 4c1a0a1623e5e..3d4fd40c987bd 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -938,15 +938,6 @@ nfsd(void *vrqstp)
+ 
+       current->fs->umask = 0;
+ 
+-      /*
+-       * thread is spawned with all signals set to SIG_IGN, re-enable
+-       * the ones that will bring down the thread
+-       */
+-      allow_signal(SIGKILL);
+-      allow_signal(SIGHUP);
+-      allow_signal(SIGINT);
+-      allow_signal(SIGQUIT);
+-
+       atomic_inc(&nfsdstats.th_cnt);
+ 
+       set_freezable();
+@@ -971,9 +962,6 @@ nfsd(void *vrqstp)
+               validate_process_creds();
+       }
+ 
+-      /* Clear signals before calling svc_exit_thread() */
+-      flush_signals(current);
+-
+       atomic_dec(&nfsdstats.th_cnt);
+ 
+ out:
+diff --git a/include/net/tls.h b/include/net/tls.h
+index ea0aeae26cf76..59ff5c901ab56 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -128,9 +128,6 @@ struct tls_sw_context_tx {
+       struct tls_rec *open_rec;
+       struct list_head tx_list;
+       atomic_t encrypt_pending;
+-      /* protect crypto_wait with encrypt_pending */
+-      spinlock_t encrypt_compl_lock;
+-      int async_notify;
+       u8 async_capable:1;
+ 
+ #define BIT_TX_SCHEDULED      0
+@@ -148,9 +145,6 @@ struct tls_sw_context_rx {
+       struct sk_buff *recv_pkt;
+       u8 async_capable:1;
+       atomic_t decrypt_pending;
+-      /* protect crypto_wait with decrypt_pending*/
+-      spinlock_t decrypt_compl_lock;
+-      bool async_notify;
+ };
+ 
+ struct tls_record_info {
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 82df02695bbdd..216445dd44db9 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -352,7 +352,7 @@ static void netlink_overrun(struct sock *sk)
+       if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
+               if (!test_and_set_bit(NETLINK_S_CONGESTED,
+                                     &nlk_sk(sk)->state)) {
+-                      sk->sk_err = ENOBUFS;
++                      WRITE_ONCE(sk->sk_err, ENOBUFS);
+                       sk_error_report(sk);
+               }
+       }
+@@ -1591,7 +1591,7 @@ static int do_one_set_err(struct sock *sk, struct 
netlink_set_err_data *p)
+               goto out;
+       }
+ 
+-      sk->sk_err = p->code;
++      WRITE_ONCE(sk->sk_err, p->code);
+       sk_error_report(sk);
+ out:
+       return ret;
+@@ -1935,7 +1935,7 @@ static int netlink_recvmsg(struct socket *sock, struct 
msghdr *msg, size_t len,
+       struct sock *sk = sock->sk;
+       struct netlink_sock *nlk = nlk_sk(sk);
+       int noblock = flags & MSG_DONTWAIT;
+-      size_t copied;
++      size_t copied, max_recvmsg_len;
+       struct sk_buff *skb, *data_skb;
+       int err, ret;
+ 
+@@ -1968,9 +1968,10 @@ static int netlink_recvmsg(struct socket *sock, struct 
msghdr *msg, size_t len,
+ #endif
+ 
+       /* Record the max length of recvmsg() calls for future allocations */
+-      nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
+-      nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
+-                                   SKB_WITH_OVERHEAD(32768));
++      max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
++      max_recvmsg_len = min_t(size_t, max_recvmsg_len,
++                              SKB_WITH_OVERHEAD(32768));
++      WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
+ 
+       copied = data_skb->len;
+       if (len < copied) {
+@@ -2005,7 +2006,7 @@ static int netlink_recvmsg(struct socket *sock, struct 
msghdr *msg, size_t len,
+           atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+               ret = netlink_dump(sk);
+               if (ret) {
+-                      sk->sk_err = -ret;
++                      WRITE_ONCE(sk->sk_err, -ret);
+                       sk_error_report(sk);
+               }
+       }
+@@ -2219,6 +2220,7 @@ static int netlink_dump(struct sock *sk)
+       struct netlink_ext_ack extack = {};
+       struct netlink_callback *cb;
+       struct sk_buff *skb = NULL;
++      size_t max_recvmsg_len;
+       struct module *module;
+       int err = -ENOBUFS;
+       int alloc_min_size;
+@@ -2241,8 +2243,9 @@ static int netlink_dump(struct sock *sk)
+       cb = &nlk->cb;
+       alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+ 
+-      if (alloc_min_size < nlk->max_recvmsg_len) {
+-              alloc_size = nlk->max_recvmsg_len;
++      max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
++      if (alloc_min_size < max_recvmsg_len) {
++              alloc_size = max_recvmsg_len;
+               skb = alloc_skb(alloc_size,
+                               (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
+                               __GFP_NOWARN | __GFP_NORETRY);
+@@ -2439,7 +2442,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr 
*nlh, int err,
+ 
+       skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
+       if (!skb) {
+-              NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
++              WRITE_ONCE(NETLINK_CB(in_skb).sk->sk_err, ENOBUFS);
+               sk_error_report(NETLINK_CB(in_skb).sk);
+               return;
+       }
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 67ccf1a6459ae..b19592673eef2 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -700,8 +700,8 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
+                       /* Made progress, don't sleep yet */
+                       continue;
+ 
+-              set_current_state(TASK_INTERRUPTIBLE);
+-              if (signalled() || kthread_should_stop()) {
++              set_current_state(TASK_IDLE);
++              if (kthread_should_stop()) {
+                       set_current_state(TASK_RUNNING);
+                       return -EINTR;
+               }
+@@ -736,7 +736,7 @@ rqst_should_sleep(struct svc_rqst *rqstp)
+               return false;
+ 
+       /* are we shutting down? */
+-      if (signalled() || kthread_should_stop())
++      if (kthread_should_stop())
+               return false;
+ 
+       /* are we freezing? */
+@@ -758,11 +758,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst 
*rqstp, long timeout)
+       if (rqstp->rq_xprt)
+               goto out_found;
+ 
+-      /*
+-       * We have to be able to interrupt this wait
+-       * to bring down the daemons ...
+-       */
+-      set_current_state(TASK_INTERRUPTIBLE);
++      set_current_state(TASK_IDLE);
+       smp_mb__before_atomic();
+       clear_bit(SP_CONGESTED, &pool->sp_flags);
+       clear_bit(RQ_BUSY, &rqstp->rq_flags);
+@@ -784,7 +780,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst 
*rqstp, long timeout)
+       if (!time_left)
+               atomic_long_inc(&pool->sp_stats.threads_timedout);
+ 
+-      if (signalled() || kthread_should_stop())
++      if (kthread_should_stop())
+               return ERR_PTR(-EINTR);
+       return ERR_PTR(-EAGAIN);
+ out_found:
+@@ -882,7 +878,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
+       try_to_freeze();
+       cond_resched();
+       err = -EINTR;
+-      if (signalled() || kthread_should_stop())
++      if (kthread_should_stop())
+               goto out;
+ 
+       xprt = svc_get_next_xprt(rqstp, timeout);
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index fc55b65695e5c..90f6cbe5cd5d2 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -174,7 +174,17 @@ static void tls_decrypt_done(struct crypto_async_request 
*req, int err)
+       struct scatterlist *sg;
+       struct sk_buff *skb;
+       unsigned int pages;
+-      int pending;
++
++      /* If requests get too backlogged crypto API returns -EBUSY and calls
++       * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
++       * to make waiting for backlog to flush with crypto_wait_req() easier.
++       * First wait converts -EBUSY -> -EINPROGRESS, and the second one
++       * -EINPROGRESS -> 0.
++       * We have a single struct crypto_async_request per direction, this
++       * scheme doesn't help us, so just ignore the first ->complete().
++       */
++      if (err == -EINPROGRESS)
++              return;
+ 
+       skb = (struct sk_buff *)req->data;
+       tls_ctx = tls_get_ctx(skb->sk);
+@@ -221,12 +231,17 @@ static void tls_decrypt_done(struct crypto_async_request 
*req, int err)
+ 
+       kfree(aead_req);
+ 
+-      spin_lock_bh(&ctx->decrypt_compl_lock);
+-      pending = atomic_dec_return(&ctx->decrypt_pending);
+-
+-      if (!pending && ctx->async_notify)
++      if (atomic_dec_and_test(&ctx->decrypt_pending))
+               complete(&ctx->async_wait.completion);
+-      spin_unlock_bh(&ctx->decrypt_compl_lock);
++}
++
++static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
++{
++      if (!atomic_dec_and_test(&ctx->decrypt_pending))
++              crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++      atomic_inc(&ctx->decrypt_pending);
++
++      return ctx->async_wait.err;
+ }
+ 
+ static int tls_do_decryption(struct sock *sk,
+@@ -260,6 +275,7 @@ static int tls_do_decryption(struct sock *sk,
+               aead_request_set_callback(aead_req,
+                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                         tls_decrypt_done, skb);
++              BUILD_BUG_ON_INVALID(atomic_read(&ctx->decrypt_pending) < 1);
+               atomic_inc(&ctx->decrypt_pending);
+       } else {
+               aead_request_set_callback(aead_req,
+@@ -268,6 +284,10 @@ static int tls_do_decryption(struct sock *sk,
+       }
+ 
+       ret = crypto_aead_decrypt(aead_req);
++      if (ret == -EBUSY) {
++              ret = tls_decrypt_async_wait(ctx);
++              ret = ret ?: -EINPROGRESS;
++      }
+       if (ret == -EINPROGRESS) {
+               if (darg->async)
+                       return 0;
+@@ -449,7 +469,9 @@ static void tls_encrypt_done(struct crypto_async_request 
*req, int err)
+       struct sk_msg *msg_en;
+       struct tls_rec *rec;
+       bool ready = false;
+-      int pending;
++
++      if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
++              return;
+ 
+       rec = container_of(aead_req, struct tls_rec, aead_req);
+       msg_en = &rec->msg_encrypted;
+@@ -484,12 +506,8 @@ static void tls_encrypt_done(struct crypto_async_request 
*req, int err)
+                       ready = true;
+       }
+ 
+-      spin_lock_bh(&ctx->encrypt_compl_lock);
+-      pending = atomic_dec_return(&ctx->encrypt_pending);
+-
+-      if (!pending && ctx->async_notify)
++      if (atomic_dec_and_test(&ctx->encrypt_pending))
+               complete(&ctx->async_wait.completion);
+-      spin_unlock_bh(&ctx->encrypt_compl_lock);
+ 
+       if (!ready)
+               return;
+@@ -499,6 +517,15 @@ static void tls_encrypt_done(struct crypto_async_request 
*req, int err)
+               schedule_delayed_work(&ctx->tx_work.work, 1);
+ }
+ 
++static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
++{
++      if (!atomic_dec_and_test(&ctx->encrypt_pending))
++              crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++      atomic_inc(&ctx->encrypt_pending);
++
++      return ctx->async_wait.err;
++}
++
+ static int tls_do_encryption(struct sock *sk,
+                            struct tls_context *tls_ctx,
+                            struct tls_sw_context_tx *ctx,
+@@ -538,9 +565,14 @@ static int tls_do_encryption(struct sock *sk,
+ 
+       /* Add the record in tx_list */
+       list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
++      BUILD_BUG_ON_INVALID(atomic_read(&ctx->encrypt_pending) < 1);
+       atomic_inc(&ctx->encrypt_pending);
+ 
+       rc = crypto_aead_encrypt(aead_req);
++      if (rc == -EBUSY) {
++              rc = tls_encrypt_async_wait(ctx);
++              rc = rc ?: -EINPROGRESS;
++      }
+       if (!rc || rc != -EINPROGRESS) {
+               atomic_dec(&ctx->encrypt_pending);
+               sge->offset -= prot->prepend_size;
+@@ -949,7 +981,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, 
size_t size)
+       int num_zc = 0;
+       int orig_size;
+       int ret = 0;
+-      int pending;
+ 
+       if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+                              MSG_CMSG_COMPAT))
+@@ -1118,24 +1149,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr 
*msg, size_t size)
+       if (!num_async) {
+               goto send_end;
+       } else if (num_zc) {
+-              /* Wait for pending encryptions to get completed */
+-              spin_lock_bh(&ctx->encrypt_compl_lock);
+-              ctx->async_notify = true;
+-
+-              pending = atomic_read(&ctx->encrypt_pending);
+-              spin_unlock_bh(&ctx->encrypt_compl_lock);
+-              if (pending)
+-                      crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+-              else
+-                      reinit_completion(&ctx->async_wait.completion);
+-
+-              /* There can be no concurrent accesses, since we have no
+-               * pending encrypt operations
+-               */
+-              WRITE_ONCE(ctx->async_notify, false);
++              int err;
+ 
+-              if (ctx->async_wait.err) {
+-                      ret = ctx->async_wait.err;
++              /* Wait for pending encryptions to get completed */
++              err = tls_encrypt_async_wait(ctx);
++              if (err) {
++                      ret = err;
+                       copied = 0;
+               }
+       }
+@@ -1913,31 +1932,16 @@ int tls_sw_recvmsg(struct sock *sk,
+ 
+ recv_end:
+       if (async) {
+-              int pending;
+-
+               /* Wait for all previously submitted records to be decrypted */
+-              spin_lock_bh(&ctx->decrypt_compl_lock);
+-              ctx->async_notify = true;
+-              pending = atomic_read(&ctx->decrypt_pending);
+-              spin_unlock_bh(&ctx->decrypt_compl_lock);
+-              if (pending) {
+-                      err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+-                      if (err) {
+-                              /* one of async decrypt failed */
+-                              tls_err_abort(sk, err);
+-                              copied = 0;
+-                              decrypted = 0;
+-                              goto end;
+-                      }
+-              } else {
+-                      reinit_completion(&ctx->async_wait.completion);
++              err = tls_decrypt_async_wait(ctx);
++              if (err) {
++                      /* one of async decrypt failed */
++                      tls_err_abort(sk, err);
++                      copied = 0;
++                      decrypted = 0;
++                      goto end;
+               }
+ 
+-              /* There can be no concurrent accesses, since we have no
+-               * pending decrypt operations
+-               */
+-              WRITE_ONCE(ctx->async_notify, false);
+-
+               /* Drain records from the rx_list & copy if required */
+               if (is_peek || is_kvec)
+                       err = process_rx_list(ctx, msg, &control, copied,
+@@ -2154,16 +2158,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+       struct tls_rec *rec, *tmp;
+-      int pending;
+ 
+       /* Wait for any pending async encryptions to complete */
+-      spin_lock_bh(&ctx->encrypt_compl_lock);
+-      ctx->async_notify = true;
+-      pending = atomic_read(&ctx->encrypt_pending);
+-      spin_unlock_bh(&ctx->encrypt_compl_lock);
+-
+-      if (pending)
+-              crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++      tls_encrypt_async_wait(ctx);
+ 
+       tls_tx_records(sk, -1);
+ 
+@@ -2301,6 +2298,46 @@ void tls_sw_strparser_arm(struct sock *sk, struct 
tls_context *tls_ctx)
+       strp_check_rcv(&rx_ctx->strp);
+ }
+ 
++static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct 
sock *sk)
++{
++      struct tls_sw_context_tx *sw_ctx_tx;
++
++      if (!ctx->priv_ctx_tx) {
++              sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
++              if (!sw_ctx_tx)
++                      return NULL;
++      } else {
++              sw_ctx_tx = ctx->priv_ctx_tx;
++      }
++
++      crypto_init_wait(&sw_ctx_tx->async_wait);
++      atomic_set(&sw_ctx_tx->encrypt_pending, 1);
++      INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
++      INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
++      sw_ctx_tx->tx_work.sk = sk;
++
++      return sw_ctx_tx;
++}
++
++static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
++{
++      struct tls_sw_context_rx *sw_ctx_rx;
++
++      if (!ctx->priv_ctx_rx) {
++              sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
++              if (!sw_ctx_rx)
++                      return NULL;
++      } else {
++              sw_ctx_rx = ctx->priv_ctx_rx;
++      }
++
++      crypto_init_wait(&sw_ctx_rx->async_wait);
++      atomic_set(&sw_ctx_rx->decrypt_pending, 1);
++      skb_queue_head_init(&sw_ctx_rx->rx_list);
++
++      return sw_ctx_rx;
++}
++
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ {
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+@@ -2327,46 +2364,22 @@ int tls_set_sw_offload(struct sock *sk, struct 
tls_context *ctx, int tx)
+       }
+ 
+       if (tx) {
+-              if (!ctx->priv_ctx_tx) {
+-                      sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
+-                      if (!sw_ctx_tx) {
+-                              rc = -ENOMEM;
+-                              goto out;
+-                      }
+-                      ctx->priv_ctx_tx = sw_ctx_tx;
+-              } else {
+-                      sw_ctx_tx =
+-                              (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
+-              }
+-      } else {
+-              if (!ctx->priv_ctx_rx) {
+-                      sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
+-                      if (!sw_ctx_rx) {
+-                              rc = -ENOMEM;
+-                              goto out;
+-                      }
+-                      ctx->priv_ctx_rx = sw_ctx_rx;
+-              } else {
+-                      sw_ctx_rx =
+-                              (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
+-              }
+-      }
++              ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
++              if (!ctx->priv_ctx_tx)
++                      return -ENOMEM;
+ 
+-      if (tx) {
+-              crypto_init_wait(&sw_ctx_tx->async_wait);
+-              spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
++              sw_ctx_tx = ctx->priv_ctx_tx;
+               crypto_info = &ctx->crypto_send.info;
+               cctx = &ctx->tx;
+               aead = &sw_ctx_tx->aead_send;
+-              INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
+-              INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
+-              sw_ctx_tx->tx_work.sk = sk;
+       } else {
+-              crypto_init_wait(&sw_ctx_rx->async_wait);
+-              spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
++              ctx->priv_ctx_rx = init_ctx_rx(ctx);
++              if (!ctx->priv_ctx_rx)
++                      return -ENOMEM;
++
++              sw_ctx_rx = ctx->priv_ctx_rx;
+               crypto_info = &ctx->crypto_recv.info;
+               cctx = &ctx->rx;
+-              skb_queue_head_init(&sw_ctx_rx->rx_list);
+               aead = &sw_ctx_rx->aead_recv;
+       }
+ 
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c 
b/security/keys/trusted-keys/trusted_tpm2.c
+index bc700f85f80be..ea277c55a38db 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -38,6 +38,7 @@ static int tpm2_key_encode(struct trusted_key_payload 
*payload,
+       u8 *end_work = scratch + SCRATCH_SIZE;
+       u8 *priv, *pub;
+       u16 priv_len, pub_len;
++      int ret;
+ 
+       priv_len = get_unaligned_be16(src) + 2;
+       priv = src;
+@@ -57,8 +58,10 @@ static int tpm2_key_encode(struct trusted_key_payload 
*payload,
+               unsigned char bool[3], *w = bool;
+               /* tag 0 is emptyAuth */
+               w = asn1_encode_boolean(w, w + sizeof(bool), true);
+-              if (WARN(IS_ERR(w), "BUG: Boolean failed to encode"))
+-                      return PTR_ERR(w);
++              if (WARN(IS_ERR(w), "BUG: Boolean failed to encode")) {
++                      ret = PTR_ERR(w);
++                      goto err;
++              }
+               work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
+       }
+ 
+@@ -69,8 +72,10 @@ static int tpm2_key_encode(struct trusted_key_payload 
*payload,
+        * trigger, so if it does there's something nefarious going on
+        */
+       if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
+-               "BUG: scratch buffer is too small"))
+-              return -EINVAL;
++               "BUG: scratch buffer is too small")) {
++              ret = -EINVAL;
++              goto err;
++      }
+ 
+       work = asn1_encode_integer(work, end_work, options->keyhandle);
+       work = asn1_encode_octet_string(work, end_work, pub, pub_len);
+@@ -79,10 +84,18 @@ static int tpm2_key_encode(struct trusted_key_payload 
*payload,
+       work1 = payload->blob;
+       work1 = asn1_encode_sequence(work1, work1 + sizeof(payload->blob),
+                                    scratch, work - scratch);
+-      if (WARN(IS_ERR(work1), "BUG: ASN.1 encoder failed"))
+-              return PTR_ERR(work1);
++      if (IS_ERR(work1)) {
++              ret = PTR_ERR(work1);
++              pr_err("BUG: ASN.1 encoder failed with %d\n", ret);
++              goto err;
++      }
+ 
++      kfree(scratch);
+       return work1 - payload->blob;
++
++err:
++      kfree(scratch);
++      return ret;
+ }
+ 
+ struct tpm2_key_context {
+diff --git a/tools/testing/selftests/vm/map_hugetlb.c 
b/tools/testing/selftests/vm/map_hugetlb.c
+index c65c55b7a789f..312889edb84ab 100644
+--- a/tools/testing/selftests/vm/map_hugetlb.c
++++ b/tools/testing/selftests/vm/map_hugetlb.c
+@@ -15,7 +15,6 @@
+ #include <unistd.h>
+ #include <sys/mman.h>
+ #include <fcntl.h>
+-#include "vm_util.h"
+ 
+ #define LENGTH (256UL*1024*1024)
+ #define PROTECTION (PROT_READ | PROT_WRITE)
+@@ -71,16 +70,10 @@ int main(int argc, char **argv)
+ {
+       void *addr;
+       int ret;
+-      size_t hugepage_size;
+       size_t length = LENGTH;
+       int flags = FLAGS;
+       int shift = 0;
+ 
+-      hugepage_size = default_huge_page_size();
+-      /* munmap with fail if the length is not page aligned */
+-      if (hugepage_size > length)
+-              length = hugepage_size;
+-
+       if (argc > 1)
+               length = atol(argv[1]) << 20;
+       if (argc > 2) {


Reply via email to