commit:     daa429d9e46fa8d87a88d9388e912c537dbb4161
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar  6 18:09:12 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar  6 18:09:12 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=daa429d9

Linux patch 5.10.212

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1211_linux-5.10.212.patch | 1357 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1361 insertions(+)

diff --git a/0000_README b/0000_README
index bcc82e6d..ff3682f8 100644
--- a/0000_README
+++ b/0000_README
@@ -887,6 +887,10 @@ Patch:  1210_linux-5.10.211.patch
 From:   https://www.kernel.org
 Desc:   Linux 5.10.211
 
+Patch:  1211_linux-5.10.212.patch
+From:   https://www.kernel.org
+Desc:   Linux 5.10.212
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1211_linux-5.10.212.patch b/1211_linux-5.10.212.patch
new file mode 100644
index 00000000..89fddcdc
--- /dev/null
+++ b/1211_linux-5.10.212.patch
@@ -0,0 +1,1357 @@
+diff --git a/Makefile b/Makefile
+index dc55a86e0f7df..d7ec0be4cd791 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 211
++SUBLEVEL = 212
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/riscv/include/asm/pgtable.h 
b/arch/riscv/include/asm/pgtable.h
+index b16304fdf4489..5ab13570daa53 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -44,7 +44,7 @@
+  * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+  * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+  */
+-#define vmemmap               ((struct page *)VMEMMAP_START)
++#define vmemmap               ((struct page *)VMEMMAP_START - (phys_ram_base 
>> PAGE_SHIFT))
+ 
+ #define PCI_IO_SIZE      SZ_16M
+ #define PCI_IO_END       VMEMMAP_START
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index c6ad53e38f653..a7a8c7731c1a4 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -178,6 +178,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+       return false;
+ }
+ 
++#define MSR_IA32_TME_ACTIVATE         0x982
++
++/* Helpers to access TME_ACTIVATE MSR */
++#define TME_ACTIVATE_LOCKED(x)                (x & 0x1)
++#define TME_ACTIVATE_ENABLED(x)               (x & 0x2)
++
++#define TME_ACTIVATE_POLICY(x)                ((x >> 4) & 0xf)        /* Bits 
7:4 */
++#define TME_ACTIVATE_POLICY_AES_XTS_128       0
++
++#define TME_ACTIVATE_KEYID_BITS(x)    ((x >> 32) & 0xf)       /* Bits 35:32 */
++
++#define TME_ACTIVATE_CRYPTO_ALGS(x)   ((x >> 48) & 0xffff)    /* Bits 63:48 */
++#define TME_ACTIVATE_CRYPTO_AES_XTS_128       1
++
++/* Values for mktme_status (SW only construct) */
++#define MKTME_ENABLED                 0
++#define MKTME_DISABLED                        1
++#define MKTME_UNINITIALIZED           2
++static int mktme_status = MKTME_UNINITIALIZED;
++
++static void detect_tme_early(struct cpuinfo_x86 *c)
++{
++      u64 tme_activate, tme_policy, tme_crypto_algs;
++      int keyid_bits = 0, nr_keyids = 0;
++      static u64 tme_activate_cpu0 = 0;
++
++      rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
++
++      if (mktme_status != MKTME_UNINITIALIZED) {
++              if (tme_activate != tme_activate_cpu0) {
++                      /* Broken BIOS? */
++                      pr_err_once("x86/tme: configuration is inconsistent 
between CPUs\n");
++                      pr_err_once("x86/tme: MKTME is not usable\n");
++                      mktme_status = MKTME_DISABLED;
++
++                      /* Proceed. We may need to exclude bits from 
x86_phys_bits. */
++              }
++      } else {
++              tme_activate_cpu0 = tme_activate;
++      }
++
++      if (!TME_ACTIVATE_LOCKED(tme_activate) || 
!TME_ACTIVATE_ENABLED(tme_activate)) {
++              pr_info_once("x86/tme: not enabled by BIOS\n");
++              mktme_status = MKTME_DISABLED;
++              return;
++      }
++
++      if (mktme_status != MKTME_UNINITIALIZED)
++              goto detect_keyid_bits;
++
++      pr_info("x86/tme: enabled by BIOS\n");
++
++      tme_policy = TME_ACTIVATE_POLICY(tme_activate);
++      if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
++              pr_warn("x86/tme: Unknown policy is active: %#llx\n", 
tme_policy);
++
++      tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
++      if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
++              pr_err("x86/mktme: No known encryption algorithm is supported: 
%#llx\n",
++                              tme_crypto_algs);
++              mktme_status = MKTME_DISABLED;
++      }
++detect_keyid_bits:
++      keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
++      nr_keyids = (1UL << keyid_bits) - 1;
++      if (nr_keyids) {
++              pr_info_once("x86/mktme: enabled by BIOS\n");
++              pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
++      } else {
++              pr_info_once("x86/mktme: disabled by BIOS\n");
++      }
++
++      if (mktme_status == MKTME_UNINITIALIZED) {
++              /* MKTME is usable */
++              mktme_status = MKTME_ENABLED;
++      }
++
++      /*
++       * KeyID bits effectively lower the number of physical address
++       * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
++       */
++      c->x86_phys_bits -= keyid_bits;
++}
++
+ static void early_init_intel(struct cpuinfo_x86 *c)
+ {
+       u64 misc_enable;
+@@ -329,6 +413,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+        */
+       if (detect_extended_topology_early(c) < 0)
+               detect_ht_early(c);
++
++      /*
++       * Adjust the number of physical bits early because it affects the
++       * valid bits of the MTRR mask registers.
++       */
++      if (cpu_has(c, X86_FEATURE_TME))
++              detect_tme_early(c);
+ }
+ 
+ static void bsp_init_intel(struct cpuinfo_x86 *c)
+@@ -489,90 +580,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
+ #endif
+ }
+ 
+-#define MSR_IA32_TME_ACTIVATE         0x982
+-
+-/* Helpers to access TME_ACTIVATE MSR */
+-#define TME_ACTIVATE_LOCKED(x)                (x & 0x1)
+-#define TME_ACTIVATE_ENABLED(x)               (x & 0x2)
+-
+-#define TME_ACTIVATE_POLICY(x)                ((x >> 4) & 0xf)        /* Bits 
7:4 */
+-#define TME_ACTIVATE_POLICY_AES_XTS_128       0
+-
+-#define TME_ACTIVATE_KEYID_BITS(x)    ((x >> 32) & 0xf)       /* Bits 35:32 */
+-
+-#define TME_ACTIVATE_CRYPTO_ALGS(x)   ((x >> 48) & 0xffff)    /* Bits 63:48 */
+-#define TME_ACTIVATE_CRYPTO_AES_XTS_128       1
+-
+-/* Values for mktme_status (SW only construct) */
+-#define MKTME_ENABLED                 0
+-#define MKTME_DISABLED                        1
+-#define MKTME_UNINITIALIZED           2
+-static int mktme_status = MKTME_UNINITIALIZED;
+-
+-static void detect_tme(struct cpuinfo_x86 *c)
+-{
+-      u64 tme_activate, tme_policy, tme_crypto_algs;
+-      int keyid_bits = 0, nr_keyids = 0;
+-      static u64 tme_activate_cpu0 = 0;
+-
+-      rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+-
+-      if (mktme_status != MKTME_UNINITIALIZED) {
+-              if (tme_activate != tme_activate_cpu0) {
+-                      /* Broken BIOS? */
+-                      pr_err_once("x86/tme: configuration is inconsistent 
between CPUs\n");
+-                      pr_err_once("x86/tme: MKTME is not usable\n");
+-                      mktme_status = MKTME_DISABLED;
+-
+-                      /* Proceed. We may need to exclude bits from 
x86_phys_bits. */
+-              }
+-      } else {
+-              tme_activate_cpu0 = tme_activate;
+-      }
+-
+-      if (!TME_ACTIVATE_LOCKED(tme_activate) || 
!TME_ACTIVATE_ENABLED(tme_activate)) {
+-              pr_info_once("x86/tme: not enabled by BIOS\n");
+-              mktme_status = MKTME_DISABLED;
+-              return;
+-      }
+-
+-      if (mktme_status != MKTME_UNINITIALIZED)
+-              goto detect_keyid_bits;
+-
+-      pr_info("x86/tme: enabled by BIOS\n");
+-
+-      tme_policy = TME_ACTIVATE_POLICY(tme_activate);
+-      if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
+-              pr_warn("x86/tme: Unknown policy is active: %#llx\n", 
tme_policy);
+-
+-      tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
+-      if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
+-              pr_err("x86/mktme: No known encryption algorithm is supported: 
%#llx\n",
+-                              tme_crypto_algs);
+-              mktme_status = MKTME_DISABLED;
+-      }
+-detect_keyid_bits:
+-      keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
+-      nr_keyids = (1UL << keyid_bits) - 1;
+-      if (nr_keyids) {
+-              pr_info_once("x86/mktme: enabled by BIOS\n");
+-              pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
+-      } else {
+-              pr_info_once("x86/mktme: disabled by BIOS\n");
+-      }
+-
+-      if (mktme_status == MKTME_UNINITIALIZED) {
+-              /* MKTME is usable */
+-              mktme_status = MKTME_ENABLED;
+-      }
+-
+-      /*
+-       * KeyID bits effectively lower the number of physical address
+-       * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
+-       */
+-      c->x86_phys_bits -= keyid_bits;
+-}
+-
+ static void init_cpuid_fault(struct cpuinfo_x86 *c)
+ {
+       u64 msr;
+@@ -708,9 +715,6 @@ static void init_intel(struct cpuinfo_x86 *c)
+ 
+       init_ia32_feat_ctl(c);
+ 
+-      if (cpu_has(c, X86_FEATURE_TME))
+-              detect_tme(c);
+-
+       init_intel_misc_features(c);
+ 
+       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c 
b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+index 2cfc36d141c07..c58fac5748359 100644
+--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+@@ -101,7 +101,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct 
virtio_crypto_request *
+ }
+ 
+ static int virtio_crypto_alg_akcipher_init_session(struct 
virtio_crypto_akcipher_ctx *ctx,
+-              struct virtio_crypto_ctrl_header *header, void *para,
++              struct virtio_crypto_ctrl_header *header,
++              struct virtio_crypto_akcipher_session_para *para,
+               const uint8_t *key, unsigned int keylen)
+ {
+       struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
+@@ -125,7 +126,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct 
virtio_crypto_akcipher
+ 
+       ctrl = &vc_ctrl_req->ctrl;
+       memcpy(&ctrl->header, header, sizeof(ctrl->header));
+-      memcpy(&ctrl->u, para, sizeof(ctrl->u));
++      memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
+       input = &vc_ctrl_req->input;
+       input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ 
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index f383f219ed008..7082a5a6814a4 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -109,6 +109,7 @@
+ #define FSL_QDMA_CMD_WTHROTL_OFFSET   20
+ #define FSL_QDMA_CMD_DSEN_OFFSET      19
+ #define FSL_QDMA_CMD_LWC_OFFSET               16
++#define FSL_QDMA_CMD_PF                       BIT(17)
+ 
+ /* Field definition for Descriptor status */
+ #define QDMA_CCDF_STATUS_RTE          BIT(5)
+@@ -384,7 +385,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp 
*fsl_comp,
+       qdma_csgf_set_f(csgf_dest, len);
+       /* Descriptor Buffer */
+       cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
+-                        FSL_QDMA_CMD_RWTTYPE_OFFSET);
++                        FSL_QDMA_CMD_RWTTYPE_OFFSET) |
++                        FSL_QDMA_CMD_PF;
+       sdf->data = QDMA_SDDF_CMD(cmd);
+ 
+       cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
+@@ -1201,10 +1203,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
+       if (!fsl_qdma->queue)
+               return -ENOMEM;
+ 
+-      ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+-      if (ret)
+-              return ret;
+-
+       fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+       if (fsl_qdma->irq_base < 0)
+               return fsl_qdma->irq_base;
+@@ -1243,16 +1241,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
+ 
+       platform_set_drvdata(pdev, fsl_qdma);
+ 
+-      ret = dma_async_device_register(&fsl_qdma->dma_dev);
++      ret = fsl_qdma_reg_init(fsl_qdma);
+       if (ret) {
+-              dev_err(&pdev->dev,
+-                      "Can't register NXP Layerscape qDMA engine.\n");
++              dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+               return ret;
+       }
+ 
+-      ret = fsl_qdma_reg_init(fsl_qdma);
++      ret = fsl_qdma_irq_init(pdev, fsl_qdma);
++      if (ret)
++              return ret;
++
++      ret = dma_async_device_register(&fsl_qdma->dma_dev);
+       if (ret) {
+-              dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
++              dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA 
engine.\n");
+               return ret;
+       }
+ 
+diff --git a/drivers/firmware/efi/capsule-loader.c 
b/drivers/firmware/efi/capsule-loader.c
+index 3e8d4b51a8140..97bafb5f70389 100644
+--- a/drivers/firmware/efi/capsule-loader.c
++++ b/drivers/firmware/efi/capsule-loader.c
+@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct 
file *file)
+               return -ENOMEM;
+       }
+ 
+-      cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
++      cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
+       if (!cap_info->phys) {
+               kfree(cap_info->pages);
+               kfree(cap_info);
+diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
+index 05637d5851526..1b50470c4e381 100644
+--- a/drivers/gpio/gpio-74x164.c
++++ b/drivers/gpio/gpio-74x164.c
+@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
+       if (IS_ERR(chip->gpiod_oe))
+               return PTR_ERR(chip->gpiod_oe);
+ 
+-      gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+-
+       spi_set_drvdata(spi, chip);
+ 
+       chip->gpio_chip.label = spi->modalias;
+@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
+               goto exit_destroy;
+       }
+ 
++      gpiod_set_value_cansleep(chip->gpiod_oe, 1);
++
+       ret = gpiochip_add_data(&chip->gpio_chip, chip);
+       if (!ret)
+               return 0;
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index d10f621085e2e..374bb9f432660 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -732,11 +732,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, 
void *data,
+ 
+       ret = gpiochip_irqchip_init_valid_mask(gc);
+       if (ret)
+-              goto err_remove_acpi_chip;
++              goto err_free_hogs;
+ 
+       ret = gpiochip_irqchip_init_hw(gc);
+       if (ret)
+-              goto err_remove_acpi_chip;
++              goto err_remove_irqchip_mask;
+ 
+       ret = gpiochip_add_irqchip(gc, lock_key, request_key);
+       if (ret)
+@@ -761,13 +761,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, 
void *data,
+       gpiochip_irqchip_remove(gc);
+ err_remove_irqchip_mask:
+       gpiochip_irqchip_free_valid_mask(gc);
+-err_remove_acpi_chip:
++err_free_hogs:
++      gpiochip_free_hogs(gc);
+       acpi_gpiochip_remove(gc);
++      gpiochip_remove_pin_ranges(gc);
+ err_remove_of_chip:
+-      gpiochip_free_hogs(gc);
+       of_gpiochip_remove(gc);
+ err_free_gpiochip_mask:
+-      gpiochip_remove_pin_ranges(gc);
+       gpiochip_free_valid_mask(gc);
+ err_remove_from_list:
+       spin_lock_irqsave(&gpio_lock, flags);
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 87807ef010a96..2059cd226cbdc 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -997,10 +997,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
+       static unsigned ext_csd_bits[] = {
+               EXT_CSD_BUS_WIDTH_8,
+               EXT_CSD_BUS_WIDTH_4,
++              EXT_CSD_BUS_WIDTH_1,
+       };
+       static unsigned bus_widths[] = {
+               MMC_BUS_WIDTH_8,
+               MMC_BUS_WIDTH_4,
++              MMC_BUS_WIDTH_1,
+       };
+       struct mmc_host *host = card->host;
+       unsigned idx, bus_width = 0;
+diff --git a/drivers/mmc/host/sdhci-xenon-phy.c 
b/drivers/mmc/host/sdhci-xenon-phy.c
+index 03ce57ef45858..53bf9870cd71b 100644
+--- a/drivers/mmc/host/sdhci-xenon-phy.c
++++ b/drivers/mmc/host/sdhci-xenon-phy.c
+@@ -11,6 +11,7 @@
+ #include <linux/slab.h>
+ #include <linux/delay.h>
+ #include <linux/ktime.h>
++#include <linux/iopoll.h>
+ #include <linux/of_address.h>
+ 
+ #include "sdhci-pltfm.h"
+@@ -109,6 +110,8 @@
+ #define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST    (XENON_EMMC_PHY_REG_BASE + 0x18)
+ #define XENON_LOGIC_TIMING_VALUE              0x00AA8977
+ 
++#define XENON_MAX_PHY_TIMEOUT_LOOPS           100
++
+ /*
+  * List offset of PHY registers and some special register values
+  * in eMMC PHY 5.0 or eMMC PHY 5.1
+@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
+       return 0;
+ }
+ 
++static int xenon_check_stability_internal_clk(struct sdhci_host *host)
++{
++      u32 reg;
++      int err;
++
++      err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
++                              1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
++      if (err)
++              dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never 
stabilized.\n");
++
++      return err;
++}
++
+ /*
+  * eMMC 5.0/5.1 PHY init/re-init.
+  * eMMC PHY init should be executed after:
+@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
+       struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
+       struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ 
++      int ret = xenon_check_stability_internal_clk(host);
++
++      if (ret)
++              return ret;
++
+       reg = sdhci_readl(host, phy_regs->timing_adj);
+       reg |= XENON_PHY_INITIALIZAION;
+       sdhci_writel(host, reg, phy_regs->timing_adj);
+@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
+       /* get the wait time */
+       wait /= clock;
+       wait++;
+-      /* wait for host eMMC PHY init completes */
+-      udelay(wait);
+ 
+-      reg = sdhci_readl(host, phy_regs->timing_adj);
+-      reg &= XENON_PHY_INITIALIZAION;
+-      if (reg) {
++      /*
++       * AC5X spec says bit must be polled until zero.
++       * We see cases in which timeout can take longer
++       * than the standard calculation on AC5X, which is
++       * expected following the spec comment above.
++       * According to the spec, we must wait as long as
++       * it takes for that bit to toggle on AC5X.
++       * Cap that with 100 delay loops so we won't get
++       * stuck here forever:
++       */
++
++      ret = read_poll_timeout(sdhci_readl, reg,
++                              !(reg & XENON_PHY_INITIALIZAION),
++                              wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
++                              false, host, phy_regs->timing_adj);
++      if (ret)
+               dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete 
after %d us\n",
+-                      wait);
+-              return -ETIMEDOUT;
+-      }
++                      wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
+ 
+-      return 0;
++      return ret;
+ }
+ 
+ #define ARMADA_3700_SOC_PAD_1_8V      0x1
+diff --git a/drivers/mtd/nand/spi/gigadevice.c 
b/drivers/mtd/nand/spi/gigadevice.c
+index 33c67403c4aa1..56d1b56615f97 100644
+--- a/drivers/mtd/nand/spi/gigadevice.c
++++ b/drivers/mtd/nand/spi/gigadevice.c
+@@ -13,7 +13,10 @@
+ #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS    (1 << 4)
+ #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS      (3 << 4)
+ 
+-#define GD5FXGQ4UEXXG_REG_STATUS2             0xf0
++#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS    (1 << 4)
++#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS      (3 << 4)
++
++#define GD5FXGQXXEXXG_REG_STATUS2             0xf0
+ 
+ #define GD5FXGQ4UXFXXG_STATUS_ECC_MASK                (7 << 4)
+ #define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4)
+@@ -36,6 +39,14 @@ static SPINAND_OP_VARIANTS(read_cache_variants_f,
+               SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+ 
++static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
++              SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
++              SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
++              SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
++              SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
++              SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
++              SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
++
+ static SPINAND_OP_VARIANTS(write_cache_variants,
+               SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+               SPINAND_PROG_LOAD(true, 0, NULL, 0));
+@@ -102,7 +113,7 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device 
*spinand,
+       return -EINVAL;
+ }
+ 
+-static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
++static int gd5fxgqx_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
+                                      struct mtd_oob_region *region)
+ {
+       if (section)
+@@ -114,7 +125,7 @@ static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info 
*mtd, int section,
+       return 0;
+ }
+ 
+-static int gd5fxgq4_variant2_ooblayout_free(struct mtd_info *mtd, int section,
++static int gd5fxgqx_variant2_ooblayout_free(struct mtd_info *mtd, int section,
+                                       struct mtd_oob_region *region)
+ {
+       if (section)
+@@ -127,9 +138,10 @@ static int gd5fxgq4_variant2_ooblayout_free(struct 
mtd_info *mtd, int section,
+       return 0;
+ }
+ 
+-static const struct mtd_ooblayout_ops gd5fxgq4_variant2_ooblayout = {
+-      .ecc = gd5fxgq4_variant2_ooblayout_ecc,
+-      .free = gd5fxgq4_variant2_ooblayout_free,
++/* Valid for Q4/Q5 and Q6 (untested) devices */
++static const struct mtd_ooblayout_ops gd5fxgqx_variant2_ooblayout = {
++      .ecc = gd5fxgqx_variant2_ooblayout_ecc,
++      .free = gd5fxgqx_variant2_ooblayout_free,
+ };
+ 
+ static int gd5fxgq4xc_ooblayout_256_ecc(struct mtd_info *mtd, int section,
+@@ -165,8 +177,8 @@ static int gd5fxgq4uexxg_ecc_get_status(struct 
spinand_device *spinand,
+                                       u8 status)
+ {
+       u8 status2;
+-      struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQ4UEXXG_REG_STATUS2,
+-                                                    &status2);
++      struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
++                                                    spinand->scratchbuf);
+       int ret;
+ 
+       switch (status & STATUS_ECC_MASK) {
+@@ -187,6 +199,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct 
spinand_device *spinand,
+                * report the maximum of 4 in this case
+                */
+               /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
++              status2 = *(spinand->scratchbuf);
+               return ((status & STATUS_ECC_MASK) >> 2) |
+                       ((status2 & STATUS_ECC_MASK) >> 4);
+ 
+@@ -203,6 +216,44 @@ static int gd5fxgq4uexxg_ecc_get_status(struct 
spinand_device *spinand,
+       return -EINVAL;
+ }
+ 
++static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
++                                      u8 status)
++{
++      u8 status2;
++      struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
++                                                    spinand->scratchbuf);
++      int ret;
++
++      switch (status & STATUS_ECC_MASK) {
++      case STATUS_ECC_NO_BITFLIPS:
++              return 0;
++
++      case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
++              /*
++               * Read status2 register to determine a more fine grained
++               * bit error status
++               */
++              ret = spi_mem_exec_op(spinand->spimem, &op);
++              if (ret)
++                      return ret;
++
++              /*
++               * 1 ... 4 bits are flipped (and corrected)
++               */
++              /* bits sorted this way (1...0): ECCSE1, ECCSE0 */
++              status2 = *(spinand->scratchbuf);
++              return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
++
++      case STATUS_ECC_UNCOR_ERROR:
++              return -EBADMSG;
++
++      default:
++              break;
++      }
++
++      return -EINVAL;
++}
++
+ static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
+                                       u8 status)
+ {
+@@ -282,7 +333,7 @@ static const struct spinand_info 
gigadevice_spinand_table[] = {
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    SPINAND_HAS_QE_BIT,
+-                   SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
++                   SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+                                    gd5fxgq4uexxg_ecc_get_status)),
+       SPINAND_INFO("GD5F1GQ4UFxxG",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
+@@ -292,8 +343,18 @@ static const struct spinand_info 
gigadevice_spinand_table[] = {
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    SPINAND_HAS_QE_BIT,
+-                   SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
++                   SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+                                    gd5fxgq4ufxxg_ecc_get_status)),
++      SPINAND_INFO("GD5F1GQ5UExxG",
++                   SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51),
++                   NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
++                   NAND_ECCREQ(4, 512),
++                   SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
++                                            &write_cache_variants,
++                                            &update_cache_variants),
++                   SPINAND_HAS_QE_BIT,
++                   SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
++                                   gd5fxgq5xexxg_ecc_get_status)),
+ };
+ 
+ static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 9534f58368ccb..4e19760cddefe 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1406,26 +1406,26 @@ static int __init gtp_init(void)
+ 
+       get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
+ 
+-      err = rtnl_link_register(&gtp_link_ops);
++      err = register_pernet_subsys(&gtp_net_ops);
+       if (err < 0)
+               goto error_out;
+ 
+-      err = register_pernet_subsys(&gtp_net_ops);
++      err = rtnl_link_register(&gtp_link_ops);
+       if (err < 0)
+-              goto unreg_rtnl_link;
++              goto unreg_pernet_subsys;
+ 
+       err = genl_register_family(&gtp_genl_family);
+       if (err < 0)
+-              goto unreg_pernet_subsys;
++              goto unreg_rtnl_link;
+ 
+       pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
+               sizeof(struct pdp_ctx));
+       return 0;
+ 
+-unreg_pernet_subsys:
+-      unregister_pernet_subsys(&gtp_net_ops);
+ unreg_rtnl_link:
+       rtnl_link_unregister(&gtp_link_ops);
++unreg_pernet_subsys:
++      unregister_pernet_subsys(&gtp_net_ops);
+ error_out:
+       pr_err("error loading GTP module loaded\n");
+       return err;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 0b25b59f44033..bb0368272a1bb 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -665,6 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool 
clean)
+                                  tun->tfiles[tun->numqueues - 1]);
+               ntfile = rtnl_dereference(tun->tfiles[index]);
+               ntfile->queue_index = index;
++              ntfile->xdp_rxq.queue_index = index;
+               rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
+                                  NULL);
+ 
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index 5aad26600b03e..9b7db5fd9e08f 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -231,7 +231,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int 
phy_id, int loc)
+       err = dm_read_shared_word(dev, 1, loc, &res);
+       if (err < 0) {
+               netdev_err(dev->net, "MDIO read error: %d\n", err);
+-              return err;
++              return 0;
+       }
+ 
+       netdev_dbg(dev->net,
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 667984efeb3be..c5a666bb86ee4 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2527,7 +2527,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
+       if (dev->chipid == ID_REV_CHIP_ID_7801_)
+               buf &= ~MAC_CR_GMII_EN_;
+ 
+-      if (dev->chipid == ID_REV_CHIP_ID_7800_) {
++      if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
++          dev->chipid == ID_REV_CHIP_ID_7850_) {
+               ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+               if (!ret && sig != EEPROM_INDICATOR) {
+                       /* Implies there is no external eeprom. Set mac speed */
+diff --git a/drivers/platform/x86/touchscreen_dmi.c 
b/drivers/platform/x86/touchscreen_dmi.c
+index eedff2ae28511..ebe959db1eeb9 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -50,7 +50,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
+ };
+ 
+ static const struct ts_dmi_data chuwi_hi8_air_data = {
+-      .acpi_name      = "MSSL1680:00",
++      .acpi_name      = "MSSL1680",
+       .properties     = chuwi_hi8_air_props,
+ };
+ 
+@@ -1648,7 +1648,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
+       int error;
+ 
+       if (has_acpi_companion(dev) &&
+-          !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
++          strstarts(client->name, ts_data->acpi_name)) {
+               error = device_add_properties(dev, ts_data->properties);
+               if (error)
+                       dev_err(dev, "failed to add properties: %d\n", error);
+diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c 
b/drivers/power/supply/bq27xxx_battery_i2c.c
+index 0e32efb10ee78..6fbae8fc2e501 100644
+--- a/drivers/power/supply/bq27xxx_battery_i2c.c
++++ b/drivers/power/supply/bq27xxx_battery_i2c.c
+@@ -209,7 +209,9 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client 
*client)
+ {
+       struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+ 
+-      free_irq(client->irq, di);
++      if (client->irq)
++              free_irq(client->irq, di);
++
+       bq27xxx_battery_teardown(di);
+ 
+       mutex_lock(&battery_mutex);
+diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
+index 436ec79122ed2..d228e77175c7f 100644
+--- a/drivers/soc/qcom/rpmhpd.c
++++ b/drivers/soc/qcom/rpmhpd.c
+@@ -261,12 +261,15 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, 
unsigned int corner)
+       unsigned int active_corner, sleep_corner;
+       unsigned int this_active_corner = 0, this_sleep_corner = 0;
+       unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
++      unsigned int peer_enabled_corner;
+ 
+       to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+ 
+-      if (peer && peer->enabled)
+-              to_active_sleep(peer, peer->corner, &peer_active_corner,
++      if (peer && peer->enabled) {
++              peer_enabled_corner = max(peer->corner, peer->enable_corner);
++              to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
+                               &peer_sleep_corner);
++      }
+ 
+       active_corner = max(this_active_corner, peer_active_corner);
+ 
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 0b927736ca728..88f0e719c6ac0 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -423,8 +423,10 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
+                   dire->u.name[0] == '.' &&
+                   ctx->actor != afs_lookup_filldir &&
+                   ctx->actor != afs_lookup_one_filldir &&
+-                  memcmp(dire->u.name, ".__afs", 6) == 0)
++                  memcmp(dire->u.name, ".__afs", 6) == 0) {
++                      ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+                       continue;
++              }
+ 
+               /* found the next entry */
+               if (!dir_emit(ctx, dire->u.name, nlen,
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index be6935d191970..9a6055659c1a6 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -566,6 +566,23 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info 
*fs_info,
+       return ret;
+ }
+ 
++static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args 
*args)
++{
++      if (args->start.srcdevid == 0) {
++              if (memchr(args->start.srcdev_name, 0,
++                         sizeof(args->start.srcdev_name)) == NULL)
++                      return -ENAMETOOLONG;
++      } else {
++              args->start.srcdev_name[0] = 0;
++      }
++
++      if (memchr(args->start.tgtdev_name, 0,
++                 sizeof(args->start.tgtdev_name)) == NULL)
++          return -ENAMETOOLONG;
++
++      return 0;
++}
++
+ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
+                           struct btrfs_ioctl_dev_replace_args *args)
+ {
+@@ -578,10 +595,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info 
*fs_info,
+       default:
+               return -EINVAL;
+       }
+-
+-      if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
+-          args->start.tgtdev_name[0] == '\0')
+-              return -EINVAL;
++      ret = btrfs_check_replace_dev_names(args);
++      if (ret < 0)
++              return ret;
+ 
+       ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
+                                       args->start.srcdevid,
+diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
+index dfb14dbddf51d..3b39552c23651 100644
+--- a/fs/cachefiles/bind.c
++++ b/fs/cachefiles/bind.c
+@@ -245,6 +245,8 @@ static int cachefiles_daemon_add_cache(struct 
cachefiles_cache *cache)
+       kmem_cache_free(cachefiles_object_jar, fsdef);
+ error_root_object:
+       cachefiles_end_secure(cache, saved_cred);
++      put_cred(cache->cache_cred);
++      cache->cache_cred = NULL;
+       pr_err("Failed to register: %d\n", ret);
+       return ret;
+ }
+@@ -265,6 +267,7 @@ void cachefiles_daemon_unbind(struct cachefiles_cache 
*cache)
+ 
+       dput(cache->graveyard);
+       mntput(cache->mnt);
++      put_cred(cache->cache_cred);
+ 
+       kfree(cache->rootdirname);
+       kfree(cache->secctx);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index d66ba6f6a8115..61988b7b5be77 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1494,11 +1494,6 @@ static void mb_free_blocks(struct inode *inode, struct 
ext4_buddy *e4b,
+       mb_check_buddy(e4b);
+       mb_free_blocks_double(inode, e4b, first, count);
+ 
+-      this_cpu_inc(discard_pa_seq);
+-      e4b->bd_info->bb_free += count;
+-      if (first < e4b->bd_info->bb_first_free)
+-              e4b->bd_info->bb_first_free = first;
+-
+       /* access memory sequentially: check left neighbour,
+        * clear range and then check right neighbour
+        */
+@@ -1512,23 +1507,31 @@ static void mb_free_blocks(struct inode *inode, struct 
ext4_buddy *e4b,
+               struct ext4_sb_info *sbi = EXT4_SB(sb);
+               ext4_fsblk_t blocknr;
+ 
++              /*
++               * Fastcommit replay can free already freed blocks which
++               * corrupts allocation info. Regenerate it.
++               */
++              if (sbi->s_mount_state & EXT4_FC_REPLAY) {
++                      mb_regenerate_buddy(e4b);
++                      goto check;
++              }
++
+               blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
+               blocknr += EXT4_C2B(sbi, block);
+-              if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+-                      ext4_grp_locked_error(sb, e4b->bd_group,
+-                                            inode ? inode->i_ino : 0,
+-                                            blocknr,
+-                                            "freeing already freed block (bit 
%u); block bitmap corrupt.",
+-                                            block);
+-                      ext4_mark_group_bitmap_corrupted(
+-                              sb, e4b->bd_group,
++              ext4_grp_locked_error(sb, e4b->bd_group,
++                                    inode ? inode->i_ino : 0, blocknr,
++                                    "freeing already freed block (bit %u); 
block bitmap corrupt.",
++                                    block);
++              ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+                               EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+-              } else {
+-                      mb_regenerate_buddy(e4b);
+-              }
+-              goto done;
++              return;
+       }
+ 
++      this_cpu_inc(discard_pa_seq);
++      e4b->bd_info->bb_free += count;
++      if (first < e4b->bd_info->bb_first_free)
++              e4b->bd_info->bb_first_free = first;
++
+       /* let's maintain fragments counter */
+       if (left_is_free && right_is_free)
+               e4b->bd_info->bb_fragments--;
+@@ -1553,8 +1556,8 @@ static void mb_free_blocks(struct inode *inode, struct 
ext4_buddy *e4b,
+       if (first <= last)
+               mb_buddy_mark_free(e4b, first >> 1, last >> 1);
+ 
+-done:
+       mb_set_largest_free_order(sb, e4b->bd_info);
++check:
+       mb_check_buddy(e4b);
+ }
+ 
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 5181e6d4e18ca..a0edd4b8fa189 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -1234,6 +1234,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, 
struct fs_parameter *par
+ {
+       struct hugetlbfs_fs_context *ctx = fc->fs_private;
+       struct fs_parse_result result;
++      struct hstate *h;
+       char *rest;
+       unsigned long ps;
+       int opt;
+@@ -1278,11 +1279,12 @@ static int hugetlbfs_parse_param(struct fs_context 
*fc, struct fs_parameter *par
+ 
+       case Opt_pagesize:
+               ps = memparse(param->string, &rest);
+-              ctx->hstate = size_to_hstate(ps);
+-              if (!ctx->hstate) {
++              h = size_to_hstate(ps);
++              if (!h) {
+                       pr_err("Unsupported page size %lu MB\n", ps >> 20);
+                       return -EINVAL;
+               }
++              ctx->hstate = h;
+               return 0;
+ 
+       case Opt_min_size:
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index e33fe4b1c4e29..5f1fbf86e0ceb 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2318,6 +2318,7 @@ static void hci_error_reset(struct work_struct *work)
+ {
+       struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
+ 
++      hci_dev_hold(hdev);
+       BT_DBG("%s", hdev->name);
+ 
+       if (hdev->hw_error)
+@@ -2325,10 +2326,10 @@ static void hci_error_reset(struct work_struct *work)
+       else
+               bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
+ 
+-      if (hci_dev_do_close(hdev))
+-              return;
++      if (!hci_dev_do_close(hdev))
++              hci_dev_do_open(hdev);
+ 
+-      hci_dev_do_open(hdev);
++      hci_dev_put(hdev);
+ }
+ 
+ void hci_uuids_clear(struct hci_dev *hdev)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 4027c79786fd3..47f37080c0c55 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -4612,9 +4612,12 @@ static void hci_io_capa_request_evt(struct hci_dev 
*hdev, struct sk_buff *skb)
+       hci_dev_lock(hdev);
+ 
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+-      if (!conn || !hci_conn_ssp_enabled(conn))
++      if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+               goto unlock;
+ 
++      /* Assume remote supports SSP since it has triggered this event */
++      set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
++
+       hci_conn_hold(conn);
+ 
+       if (!hci_dev_test_flag(hdev, HCI_MGMT))
+@@ -5922,6 +5925,10 @@ static void hci_le_remote_conn_param_req_evt(struct 
hci_dev *hdev,
+               return send_conn_param_neg_reply(hdev, handle,
+                                                HCI_ERROR_UNKNOWN_CONN_ID);
+ 
++      if (max > hcon->le_conn_max_interval)
++              return send_conn_param_neg_reply(hdev, handle,
++                                               HCI_ERROR_INVALID_LL_PARAMS);
++
+       if (hci_check_conn_params(min, max, latency, timeout))
+               return send_conn_param_neg_reply(hdev, handle,
+                                                HCI_ERROR_INVALID_LL_PARAMS);
+@@ -6139,10 +6146,10 @@ static void hci_store_wake_reason(struct hci_dev 
*hdev, u8 event,
+        * keep track of the bdaddr of the connection event that woke us up.
+        */
+       if (event == HCI_EV_CONN_REQUEST) {
+-              bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
++              bacpy(&hdev->wake_addr, &conn_request->bdaddr);
+               hdev->wake_addr_type = BDADDR_BREDR;
+       } else if (event == HCI_EV_CONN_COMPLETE) {
+-              bacpy(&hdev->wake_addr, &conn_request->bdaddr);
++              bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
+               hdev->wake_addr_type = BDADDR_BREDR;
+       } else if (event == HCI_EV_LE_META) {
+               struct hci_ev_le_meta *le_ev = (void *)skb->data;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a752032e12fcf..580b6d6b970d2 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -5609,7 +5609,13 @@ static inline int l2cap_conn_param_update_req(struct 
l2cap_conn *conn,
+ 
+       memset(&rsp, 0, sizeof(rsp));
+ 
+-      err = hci_check_conn_params(min, max, latency, to_multiplier);
++      if (max > hcon->le_conn_max_interval) {
++              BT_DBG("requested connection interval exceeds current bounds.");
++              err = -EINVAL;
++      } else {
++              err = hci_check_conn_params(min, max, latency, to_multiplier);
++      }
++
+       if (err)
+               rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+       else
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 021dcfdae2835..8938320f7ba3b 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -4903,10 +4903,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, 
struct nlmsghdr *nlh,
+       struct net *net = sock_net(skb->sk);
+       struct ifinfomsg *ifm;
+       struct net_device *dev;
+-      struct nlattr *br_spec, *attr = NULL;
++      struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
+       int rem, err = -EOPNOTSUPP;
+       u16 flags = 0;
+-      bool have_flags = false;
+ 
+       if (nlmsg_len(nlh) < sizeof(*ifm))
+               return -EINVAL;
+@@ -4924,11 +4923,11 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, 
struct nlmsghdr *nlh,
+       br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (br_spec) {
+               nla_for_each_nested(attr, br_spec, rem) {
+-                      if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) 
{
++                      if (nla_type(attr) == IFLA_BRIDGE_FLAGS && 
!br_flags_attr) {
+                               if (nla_len(attr) < sizeof(flags))
+                                       return -EINVAL;
+ 
+-                              have_flags = true;
++                              br_flags_attr = attr;
+                               flags = nla_get_u16(attr);
+                       }
+ 
+@@ -4972,8 +4971,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, 
struct nlmsghdr *nlh,
+               }
+       }
+ 
+-      if (have_flags)
+-              memcpy(nla_data(attr), &flags, sizeof(flags));
++      if (br_flags_attr)
++              memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
+ out:
+       return err;
+ }
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 99f70b990eb13..50f8231e9daec 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -540,6 +540,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct 
sk_buff *skb,
+       return 0;
+ }
+ 
++static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int 
headroom)
++{
++      /* we must cap headroom to some upperlimit, else pskb_expand_head
++       * will overflow header offsets in skb_headers_offset_update().
++       */
++      static const unsigned int max_allowed = 512;
++
++      if (headroom > max_allowed)
++              headroom = max_allowed;
++
++      if (headroom > READ_ONCE(dev->needed_headroom))
++              WRITE_ONCE(dev->needed_headroom, headroom);
++}
++
+ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+                      u8 proto, int tunnel_hlen)
+ {
+@@ -613,13 +627,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+       }
+ 
+       headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+-      if (headroom > READ_ONCE(dev->needed_headroom))
+-              WRITE_ONCE(dev->needed_headroom, headroom);
+-
+-      if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
++      if (skb_cow_head(skb, headroom)) {
+               ip_rt_put(rt);
+               goto tx_dropped;
+       }
++
++      ip_tunnel_adj_headroom(dev, headroom);
++
+       iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
+                     df, !net_eq(tunnel->net, dev_net(dev)));
+       return;
+@@ -797,16 +811,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+ 
+       max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+                       + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+-      if (max_headroom > READ_ONCE(dev->needed_headroom))
+-              WRITE_ONCE(dev->needed_headroom, max_headroom);
+ 
+-      if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
++      if (skb_cow_head(skb, max_headroom)) {
+               ip_rt_put(rt);
+               dev->stats.tx_dropped++;
+               kfree_skb(skb);
+               return;
+       }
+ 
++      ip_tunnel_adj_headroom(dev, max_headroom);
++
+       iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
+                     df, !net_eq(tunnel->net, dev_net(dev)));
+       return;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 150c2f71ec89f..0429c1d50fc92 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5436,9 +5436,10 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, 
struct nlmsghdr *nlh,
+       }
+ 
+       addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
+-      if (!addr)
+-              return -EINVAL;
+-
++      if (!addr) {
++              err = -EINVAL;
++              goto errout;
++      }
+       ifm = nlmsg_data(nlh);
+       if (ifm->ifa_index)
+               dev = dev_get_by_index(tgt_net, ifm->ifa_index);
+diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
+index e57c5f47f0351..d7ca71c597545 100644
+--- a/net/mptcp/diag.c
++++ b/net/mptcp/diag.c
+@@ -21,6 +21,9 @@ static int subflow_get_info(struct sock *sk, struct sk_buff 
*skb)
+       bool slow;
+       int err;
+ 
++      if (inet_sk_state_load(sk) == TCP_LISTEN)
++              return 0;
++
+       start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
+       if (!start)
+               return -EMSGSIZE;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 72d944e6a641f..adbe6350f980b 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2052,8 +2052,50 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct 
sock *sk)
+ 
+       return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
+ }
++
++static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
++{
++      const struct ipv6_pinfo *np = inet6_sk(sk);
++      struct ipv6_txoptions *opt;
++      struct ipv6_pinfo *newnp;
++
++      newnp = inet6_sk(newsk);
++
++      rcu_read_lock();
++      opt = rcu_dereference(np->opt);
++      if (opt) {
++              opt = ipv6_dup_options(newsk, opt);
++              if (!opt)
++                      net_warn_ratelimited("%s: Failed to copy ip6 
options\n", __func__);
++      }
++      RCU_INIT_POINTER(newnp->opt, opt);
++      rcu_read_unlock();
++}
+ #endif
+ 
++static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
++{
++      struct ip_options_rcu *inet_opt, *newopt = NULL;
++      const struct inet_sock *inet = inet_sk(sk);
++      struct inet_sock *newinet;
++
++      newinet = inet_sk(newsk);
++
++      rcu_read_lock();
++      inet_opt = rcu_dereference(inet->inet_opt);
++      if (inet_opt) {
++              newopt = sock_kmalloc(newsk, sizeof(*inet_opt) +
++                                    inet_opt->opt.optlen, GFP_ATOMIC);
++              if (newopt)
++                      memcpy(newopt, inet_opt, sizeof(*inet_opt) +
++                             inet_opt->opt.optlen);
++              else
++                      net_warn_ratelimited("%s: Failed to copy ip options\n", 
__func__);
++      }
++      RCU_INIT_POINTER(newinet->inet_opt, newopt);
++      rcu_read_unlock();
++}
++
+ struct sock *mptcp_sk_clone(const struct sock *sk,
+                           const struct mptcp_options_received *mp_opt,
+                           struct request_sock *req)
+@@ -2073,6 +2115,13 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+ 
+       __mptcp_init_sock(nsk);
+ 
++#if IS_ENABLED(CONFIG_MPTCP_IPV6)
++      if (nsk->sk_family == AF_INET6)
++              mptcp_copy_ip6_options(nsk, sk);
++      else
++#endif
++              mptcp_copy_ip_options(nsk, sk);
++
+       msk = mptcp_sk(nsk);
+       msk->local_key = subflow_req->local_key;
+       msk->token = subflow_req->token;
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 77c7362a7db8e..3e0a6e7930c6c 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -336,10 +336,20 @@ static int nft_target_validate(const struct nft_ctx *ctx,
+ 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
++          ctx->family != NFPROTO_INET &&
+           ctx->family != NFPROTO_BRIDGE &&
+           ctx->family != NFPROTO_ARP)
+               return -EOPNOTSUPP;
+ 
++      ret = nft_chain_validate_hooks(ctx->chain,
++                                     (1 << NF_INET_PRE_ROUTING) |
++                                     (1 << NF_INET_LOCAL_IN) |
++                                     (1 << NF_INET_FORWARD) |
++                                     (1 << NF_INET_LOCAL_OUT) |
++                                     (1 << NF_INET_POST_ROUTING));
++      if (ret)
++              return ret;
++
+       if (nft_is_base_chain(ctx->chain)) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+@@ -584,10 +594,20 @@ static int nft_match_validate(const struct nft_ctx *ctx,
+ 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
++          ctx->family != NFPROTO_INET &&
+           ctx->family != NFPROTO_BRIDGE &&
+           ctx->family != NFPROTO_ARP)
+               return -EOPNOTSUPP;
+ 
++      ret = nft_chain_validate_hooks(ctx->chain,
++                                     (1 << NF_INET_PRE_ROUTING) |
++                                     (1 << NF_INET_LOCAL_IN) |
++                                     (1 << NF_INET_FORWARD) |
++                                     (1 << NF_INET_LOCAL_OUT) |
++                                     (1 << NF_INET_POST_ROUTING));
++      if (ret)
++              return ret;
++
+       if (nft_is_base_chain(ctx->chain)) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 359f07a53eccf..a2b14434d7aa0 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -157,7 +157,7 @@ static inline u32 netlink_group_mask(u32 group)
+ static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
+                                          gfp_t gfp_mask)
+ {
+-      unsigned int len = skb_end_offset(skb);
++      unsigned int len = skb->len;
+       struct sk_buff *new;
+ 
+       new = alloc_skb(len, gfp_mask);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 279f4977e2eed..933591f9704b8 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3772,6 +3772,8 @@ static int nl80211_set_interface(struct sk_buff *skb, 
struct genl_info *info)
+ 
+               if (ntype != NL80211_IFTYPE_MESH_POINT)
+                       return -EINVAL;
++              if (otype != NL80211_IFTYPE_MESH_POINT)
++                      return -EINVAL;
+               if (netif_running(dev))
+                       return -EBUSY;
+ 
+diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
+index 4bee32bfe16d1..6235c3be832aa 100644
+--- a/security/tomoyo/common.c
++++ b/security/tomoyo/common.c
+@@ -2657,13 +2657,14 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer 
*head,
+ {
+       int error = buffer_len;
+       size_t avail_len = buffer_len;
+-      char *cp0 = head->write_buf;
++      char *cp0;
+       int idx;
+ 
+       if (!head->write)
+               return -EINVAL;
+       if (mutex_lock_interruptible(&head->io_sem))
+               return -EINTR;
++      cp0 = head->write_buf;
+       head->read_user_buf_avail = 0;
+       idx = tomoyo_read_lock();
+       /* Read a line and dispatch it to the policy handler. */
+diff --git a/sound/core/Makefile b/sound/core/Makefile
+index d123587c0fd8f..bc04acf4a45ce 100644
+--- a/sound/core/Makefile
++++ b/sound/core/Makefile
+@@ -32,7 +32,6 @@ snd-pcm-dmaengine-objs := pcm_dmaengine.o
+ snd-rawmidi-objs  := rawmidi.o
+ snd-timer-objs    := timer.o
+ snd-hrtimer-objs  := hrtimer.o
+-snd-rtctimer-objs := rtctimer.o
+ snd-hwdep-objs    := hwdep.o
+ snd-seq-device-objs := seq_device.o
+ 


Reply via email to