commit:     5001fb691f5a0cb75ff7bfc439fdcbe1da7fef5c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 15 10:30:56 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep 15 10:30:56 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5001fb69

Linux patch 5.10.143

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1142_linux-5.10.143.patch | 2685 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2689 insertions(+)

diff --git a/0000_README b/0000_README
index 75caafbb..32d72e53 100644
--- a/0000_README
+++ b/0000_README
@@ -611,6 +611,10 @@ Patch:  1141_linux-5.10.142.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.142
 
+Patch:  1142_linux-5.10.143.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.143
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1142_linux-5.10.143.patch b/1142_linux-5.10.143.patch
new file mode 100644
index 00000000..28c57e76
--- /dev/null
+++ b/1142_linux-5.10.143.patch
@@ -0,0 +1,2685 @@
+diff --git a/Documentation/arm64/silicon-errata.rst 
b/Documentation/arm64/silicon-errata.rst
+index f01eed0ee23ad..22a07c208fee0 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -92,6 +92,8 @@ stable kernels.
+ 
+----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A77      | #1508412        | ARM64_ERRATUM_1508412  
     |
+ 
+----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A510     | #2457168        | ARM64_ERRATUM_2457168  
     |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N1     | #1188873,1418040| ARM64_ERRATUM_1418040  
     |
+ 
+----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N1     | #1349291        | N/A                    
     |
+diff --git a/Makefile b/Makefile
+index 655fe095459b3..60b2018c26dba 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 142
++SUBLEVEL = 143
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi 
b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+index a06700e53e4c3..9c8b3eb49ea30 100644
+--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
++++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+@@ -62,8 +62,8 @@
+               regulators {
+                       vdd_3v3: VDD_IO {
+                               regulator-name = "VDD_IO";
+-                              regulator-min-microvolt = <1200000>;
+-                              regulator-max-microvolt = <3700000>;
++                              regulator-min-microvolt = <3300000>;
++                              regulator-max-microvolt = <3300000>;
+                               regulator-initial-mode = <2>;
+                               regulator-allowed-modes = <2>, <4>;
+                               regulator-always-on;
+@@ -81,8 +81,8 @@
+ 
+                       vddio_ddr: VDD_DDR {
+                               regulator-name = "VDD_DDR";
+-                              regulator-min-microvolt = <600000>;
+-                              regulator-max-microvolt = <1850000>;
++                              regulator-min-microvolt = <1200000>;
++                              regulator-max-microvolt = <1200000>;
+                               regulator-initial-mode = <2>;
+                               regulator-allowed-modes = <2>, <4>;
+                               regulator-always-on;
+@@ -104,8 +104,8 @@
+ 
+                       vdd_core: VDD_CORE {
+                               regulator-name = "VDD_CORE";
+-                              regulator-min-microvolt = <600000>;
+-                              regulator-max-microvolt = <1850000>;
++                              regulator-min-microvolt = <1250000>;
++                              regulator-max-microvolt = <1250000>;
+                               regulator-initial-mode = <2>;
+                               regulator-allowed-modes = <2>, <4>;
+                               regulator-always-on;
+@@ -146,8 +146,8 @@
+ 
+                       LDO1 {
+                               regulator-name = "LDO1";
+-                              regulator-min-microvolt = <1200000>;
+-                              regulator-max-microvolt = <3700000>;
++                              regulator-min-microvolt = <3300000>;
++                              regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+ 
+                               regulator-state-standby {
+@@ -161,9 +161,8 @@
+ 
+                       LDO2 {
+                               regulator-name = "LDO2";
+-                              regulator-min-microvolt = <1200000>;
+-                              regulator-max-microvolt = <3700000>;
+-                              regulator-always-on;
++                              regulator-min-microvolt = <1800000>;
++                              regulator-max-microvolt = <3300000>;
+ 
+                               regulator-state-standby {
+                                       regulator-on-in-suspend;
+diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts 
b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+index 634411d13b4aa..00b9e88ff5451 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+@@ -195,8 +195,8 @@
+                       regulators {
+                               vdd_io_reg: VDD_IO {
+                                       regulator-name = "VDD_IO";
+-                                      regulator-min-microvolt = <1200000>;
+-                                      regulator-max-microvolt = <3700000>;
++                                      regulator-min-microvolt = <3300000>;
++                                      regulator-max-microvolt = <3300000>;
+                                       regulator-initial-mode = <2>;
+                                       regulator-allowed-modes = <2>, <4>;
+                                       regulator-always-on;
+@@ -214,8 +214,8 @@
+ 
+                               VDD_DDR {
+                                       regulator-name = "VDD_DDR";
+-                                      regulator-min-microvolt = <600000>;
+-                                      regulator-max-microvolt = <1850000>;
++                                      regulator-min-microvolt = <1350000>;
++                                      regulator-max-microvolt = <1350000>;
+                                       regulator-initial-mode = <2>;
+                                       regulator-allowed-modes = <2>, <4>;
+                                       regulator-always-on;
+@@ -233,8 +233,8 @@
+ 
+                               VDD_CORE {
+                                       regulator-name = "VDD_CORE";
+-                                      regulator-min-microvolt = <600000>;
+-                                      regulator-max-microvolt = <1850000>;
++                                      regulator-min-microvolt = <1250000>;
++                                      regulator-max-microvolt = <1250000>;
+                                       regulator-initial-mode = <2>;
+                                       regulator-allowed-modes = <2>, <4>;
+                                       regulator-always-on;
+@@ -256,7 +256,6 @@
+                                       regulator-max-microvolt = <1850000>;
+                                       regulator-initial-mode = <2>;
+                                       regulator-allowed-modes = <2>, <4>;
+-                                      regulator-always-on;
+ 
+                                       regulator-state-standby {
+                                               regulator-on-in-suspend;
+@@ -271,8 +270,8 @@
+ 
+                               LDO1 {
+                                       regulator-name = "LDO1";
+-                                      regulator-min-microvolt = <1200000>;
+-                                      regulator-max-microvolt = <3700000>;
++                                      regulator-min-microvolt = <2500000>;
++                                      regulator-max-microvolt = <2500000>;
+                                       regulator-always-on;
+ 
+                                       regulator-state-standby {
+@@ -286,8 +285,8 @@
+ 
+                               LDO2 {
+                                       regulator-name = "LDO2";
+-                                      regulator-min-microvolt = <1200000>;
+-                                      regulator-max-microvolt = <3700000>;
++                                      regulator-min-microvolt = <3300000>;
++                                      regulator-max-microvolt = <3300000>;
+                                       regulator-always-on;
+ 
+                                       regulator-state-standby {
+diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi 
b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+index 92f9977d14822..e9a4115124eb0 100644
+--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+@@ -51,16 +51,6 @@
+               vin-supply = <&reg_3p3v_s5>;
+       };
+ 
+-      reg_3p3v_s0: regulator-3p3v-s0 {
+-              compatible = "regulator-fixed";
+-              regulator-name = "V_3V3_S0";
+-              regulator-min-microvolt = <3300000>;
+-              regulator-max-microvolt = <3300000>;
+-              regulator-always-on;
+-              regulator-boot-on;
+-              vin-supply = <&reg_3p3v_s5>;
+-      };
+-
+       reg_3p3v_s5: regulator-3p3v-s5 {
+               compatible = "regulator-fixed";
+               regulator-name = "V_3V3_S5";
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 7c7906e9dafda..1116a8d092c01 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -657,6 +657,24 @@ config ARM64_ERRATUM_1508412
+ 
+         If unsure, say Y.
+ 
++config ARM64_ERRATUM_2457168
++      bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing 
incorrectly"
++      depends on ARM64_AMU_EXTN
++      default y
++      help
++        This option adds the workaround for ARM Cortex-A510 erratum 2457168.
++
++        The AMU counter AMEVCNTR01 (constant counter) should increment at the 
same rate
++        as the system counter. On affected Cortex-A510 cores AMEVCNTR01 
increments
++        incorrectly giving a significantly higher output value.
++
++        Work around this problem by keeping the reference values of affected 
counters
++        to 0 thus signaling an error case. This effect is the same to 
firmware disabling
++        affected counters, in which case 0 will be returned when reading the 
disabled
++        counters.
++
++        If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+       bool "Cavium erratum 22375, 24313"
+       default y
+diff --git a/arch/arm64/include/asm/cpucaps.h 
b/arch/arm64/include/asm/cpucaps.h
+index f42fd0a2e81c8..53030d3c03a2c 100644
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -67,7 +67,8 @@
+ #define ARM64_MTE                             57
+ #define ARM64_WORKAROUND_1508412              58
+ #define ARM64_SPECTRE_BHB                     59
++#define ARM64_WORKAROUND_2457168              60
+ 
+-#define ARM64_NCAPS                           60
++#define ARM64_NCAPS                           61
+ 
+ #endif /* __ASM_CPUCAPS_H */
+diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
+index 587543c6c51cb..97c42be71338a 100644
+--- a/arch/arm64/kernel/cacheinfo.c
++++ b/arch/arm64/kernel/cacheinfo.c
+@@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
+ 
+ int init_cache_level(unsigned int cpu)
+ {
+-      unsigned int ctype, level, leaves, fw_level;
++      unsigned int ctype, level, leaves;
++      int fw_level;
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ 
+       for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
+@@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu)
+       else
+               fw_level = acpi_find_last_cache_level(cpu);
+ 
++      if (fw_level < 0)
++              return fw_level;
++
+       if (level < fw_level) {
+               /*
+                * some external caches not specified in CLIDR_EL1
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 78263dadd00da..aaacca6fd52f6 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -545,6 +545,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+                                 0, 0,
+                                 1, 0),
+       },
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_2457168
++      {
++              .desc = "ARM erratum 2457168",
++              .capability = ARM64_WORKAROUND_2457168,
++              .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
++              /* Cortex-A510 r0p0-r1p1 */
++              CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
++      },
+ #endif
+       {
+       }
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 4087e2d1f39e2..e72c90b826568 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1559,7 +1559,10 @@ static void cpu_amu_enable(struct 
arm64_cpu_capabilities const *cap)
+               pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
+                       smp_processor_id());
+               cpumask_set_cpu(smp_processor_id(), &amu_cpus);
+-              init_cpu_freq_invariance_counters();
++
++              /* 0 reference values signal broken/disabled counters */
++              if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
++                      init_cpu_freq_invariance_counters();
+       }
+ }
+ 
+diff --git a/arch/mips/loongson32/ls1c/board.c 
b/arch/mips/loongson32/ls1c/board.c
+index e9de6da0ce51f..9dcfe9de55b0a 100644
+--- a/arch/mips/loongson32/ls1c/board.c
++++ b/arch/mips/loongson32/ls1c/board.c
+@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] 
__initdata = {
+ static int __init ls1c_platform_init(void)
+ {
+       ls1x_serial_set_uartclk(&ls1x_uart_pdev);
+-      ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
+ 
+       return platform_add_devices(ls1c_platform_devices,
+                                  ARRAY_SIZE(ls1c_platform_devices));
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index aa93d775c34db..598d0938449da 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -22,7 +22,7 @@
+ #include <linux/init.h>
+ #include <linux/pgtable.h>
+ 
+-      .level  PA_ASM_LEVEL
++      .level  1.1
+ 
+       __INITDATA
+ ENTRY(boot_args)
+@@ -69,6 +69,47 @@ $bss_loop:
+       stw,ma          %arg2,4(%r1)
+       stw,ma          %arg3,4(%r1)
+ 
++#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
++      /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
++       * and halt kernel if we detect a PA1.x CPU. */
++      ldi             32,%r10
++      mtctl           %r10,%cr11
++      .level 2.0
++      mfctl,w         %cr11,%r10
++      .level 1.1
++      comib,<>,n      0,%r10,$cpu_ok
++
++      load32          PA(msg1),%arg0
++      ldi             msg1_end-msg1,%arg1
++$iodc_panic:
++      copy            %arg0, %r10
++      copy            %arg1, %r11
++      load32          PA(init_stack),%sp
++#define MEM_CONS 0x3A0
++      ldw             MEM_CONS+32(%r0),%arg0  // HPA
++      ldi             ENTRY_IO_COUT,%arg1
++      ldw             MEM_CONS+36(%r0),%arg2  // SPA
++      ldw             MEM_CONS+8(%r0),%arg3   // layers
++      load32          PA(__bss_start),%r1
++      stw             %r1,-52(%sp)            // arg4
++      stw             %r0,-56(%sp)            // arg5
++      stw             %r10,-60(%sp)           // arg6 = ptr to text
++      stw             %r11,-64(%sp)           // arg7 = len
++      stw             %r0,-68(%sp)            // arg8
++      load32          PA(.iodc_panic_ret), %rp
++      ldw             MEM_CONS+40(%r0),%r1    // ENTRY_IODC
++      bv,n            (%r1)
++.iodc_panic_ret:
++      b .                             /* wait endless with ... */
++      or              %r10,%r10,%r10  /* qemu idle sleep */
++msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this 
machine.\r\n"
++msg1_end:
++
++$cpu_ok:
++#endif
++
++      .level  PA_ASM_LEVEL
++
+       /* Initialize startup VM. Just map first 16/32 MB of memory */
+       load32          PA(swapper_pg_dir),%r4
+       mtctl           %r4,%cr24       /* Initialize kernel root pointer */
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 9d5460f6e0ff1..6f33d62331b1f 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1852,6 +1852,12 @@ static void free_info(struct blkfront_info *info)
+       kfree(info);
+ }
+ 
++/* Enable the persistent grants feature. */
++static bool feature_persistent = true;
++module_param(feature_persistent, bool, 0644);
++MODULE_PARM_DESC(feature_persistent,
++              "Enables the persistent grants feature");
++
+ /* Common code used when first setting up, and when resuming. */
+ static int talk_to_blkback(struct xenbus_device *dev,
+                          struct blkfront_info *info)
+@@ -1943,6 +1949,7 @@ again:
+               message = "writing protocol";
+               goto abort_transaction;
+       }
++      info->feature_persistent_parm = feature_persistent;
+       err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
+                       info->feature_persistent_parm);
+       if (err)
+@@ -2019,12 +2026,6 @@ static int negotiate_mq(struct blkfront_info *info)
+       return 0;
+ }
+ 
+-/* Enable the persistent grants feature. */
+-static bool feature_persistent = true;
+-module_param(feature_persistent, bool, 0644);
+-MODULE_PARM_DESC(feature_persistent,
+-              "Enables the persistent grants feature");
+-
+ /**
+  * Entry point to this code when a new device is created.  Allocate the basic
+  * structures and the ring buffer for communication with the backend, and
+@@ -2394,7 +2395,6 @@ static void blkfront_gather_backend_features(struct 
blkfront_info *info)
+       if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
+               blkfront_setup_discard(info);
+ 
+-      info->feature_persistent_parm = feature_persistent;
+       if (info->feature_persistent_parm)
+               info->feature_persistent =
+                       !!xenbus_read_unsigned(info->xbdev->otherend,
+diff --git a/drivers/firmware/efi/capsule-loader.c 
b/drivers/firmware/efi/capsule-loader.c
+index 4dde8edd53b62..3e8d4b51a8140 100644
+--- a/drivers/firmware/efi/capsule-loader.c
++++ b/drivers/firmware/efi/capsule-loader.c
+@@ -242,29 +242,6 @@ failed:
+       return ret;
+ }
+ 
+-/**
+- * efi_capsule_flush - called by file close or file flush
+- * @file: file pointer
+- * @id: not used
+- *
+- *    If a capsule is being partially uploaded then calling this function
+- *    will be treated as upload termination and will free those completed
+- *    buffer pages and -ECANCELED will be returned.
+- **/
+-static int efi_capsule_flush(struct file *file, fl_owner_t id)
+-{
+-      int ret = 0;
+-      struct capsule_info *cap_info = file->private_data;
+-
+-      if (cap_info->index > 0) {
+-              pr_err("capsule upload not complete\n");
+-              efi_free_all_buff_pages(cap_info);
+-              ret = -ECANCELED;
+-      }
+-
+-      return ret;
+-}
+-
+ /**
+  * efi_capsule_release - called by file close
+  * @inode: not used
+@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, 
struct file *file)
+ {
+       struct capsule_info *cap_info = file->private_data;
+ 
++      if (cap_info->index > 0 &&
++          (cap_info->header.headersize == 0 ||
++           cap_info->count < cap_info->total_size)) {
++              pr_err("capsule upload not complete\n");
++              efi_free_all_buff_pages(cap_info);
++      }
++
+       kfree(cap_info->pages);
+       kfree(cap_info->phys);
+       kfree(file->private_data);
+@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = {
+       .owner = THIS_MODULE,
+       .open = efi_capsule_open,
+       .write = efi_capsule_write,
+-      .flush = efi_capsule_flush,
+       .release = efi_capsule_release,
+       .llseek = no_llseek,
+ };
+diff --git a/drivers/firmware/efi/libstub/Makefile 
b/drivers/firmware/efi/libstub/Makefile
+index a2ae9c3b95793..433e11dab4a87 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -37,6 +37,13 @@ KBUILD_CFLAGS                       := $(cflags-y) -Os 
-DDISABLE_BRANCH_PROFILING \
+                                  $(call cc-option,-fno-addrsig) \
+                                  -D__DISABLE_EXPORTS
+ 
++#
++# struct randomization only makes sense for Linux internal types, which the 
EFI
++# stub code never touches, so let's turn off struct randomization for the stub
++# altogether
++#
++KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
++
+ # remove SCS flags from all objects in this directory
+ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 2f47f81a74a57..ae84d3b582aa5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -2146,6 +2146,9 @@ static int psp_hw_fini(void *handle)
+               psp_rap_terminate(psp);
+               psp_dtm_terminate(psp);
+               psp_hdcp_terminate(psp);
++
++              if (adev->gmc.xgmi.num_physical_nodes > 1)
++                      psp_xgmi_terminate(psp);
+       }
+ 
+       psp_asd_unload(psp);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+index 042c85fc528bb..def0b7092438f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+@@ -622,7 +622,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+               amdgpu_put_xgmi_hive(hive);
+       }
+ 
+-      return psp_xgmi_terminate(&adev->psp);
++      return 0;
+ }
+ 
+ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 405bb3efa2a96..38f4c7474487b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2570,7 +2570,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device 
*adev)
+ 
+       gfx_v9_0_tiling_mode_table_init(adev);
+ 
+-      gfx_v9_0_setup_rb(adev);
++      if (adev->gfx.num_gfx_rings)
++              gfx_v9_0_setup_rb(adev);
+       gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
+       adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index f84701c562bf2..97441f373531f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -178,6 +178,7 @@ static void mmhub_v1_0_init_cache_regs(struct 
amdgpu_device *adev)
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+       WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ 
++      tmp = mmVM_L2_CNTL3_DEFAULT;
+       if (adev->gmc.translate_further) {
+               tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+               tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 5979af230eda0..8b30e8d83fbcf 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -166,21 +166,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
+ }
+ EXPORT_SYMBOL(drm_gem_private_object_init);
+ 
+-static void
+-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file 
*filp)
+-{
+-      /*
+-       * Note: obj->dma_buf can't disappear as long as we still hold a
+-       * handle reference in obj->handle_count.
+-       */
+-      mutex_lock(&filp->prime.lock);
+-      if (obj->dma_buf) {
+-              drm_prime_remove_buf_handle_locked(&filp->prime,
+-                                                 obj->dma_buf);
+-      }
+-      mutex_unlock(&filp->prime.lock);
+-}
+-
+ /**
+  * drm_gem_object_handle_free - release resources bound to userspace handles
+  * @obj: GEM object to clean up.
+@@ -254,7 +239,7 @@ drm_gem_object_release_handle(int id, void *ptr, void 
*data)
+       else if (dev->driver->gem_close_object)
+               dev->driver->gem_close_object(obj, file_priv);
+ 
+-      drm_gem_remove_prime_handles(obj, file_priv);
++      drm_prime_remove_buf_handle(&file_priv->prime, id);
+       drm_vma_node_revoke(&obj->vma_node, file_priv);
+ 
+       drm_gem_object_handle_put_unlocked(obj);
+diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
+index b65865c630b0a..f80e0f28087d1 100644
+--- a/drivers/gpu/drm/drm_internal.h
++++ b/drivers/gpu/drm/drm_internal.h
+@@ -86,8 +86,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, 
void *data,
+ 
+ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
+ void drm_prime_destroy_file_private(struct drm_prime_file_private 
*prime_fpriv);
+-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private 
*prime_fpriv,
+-                                      struct dma_buf *dma_buf);
++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
++                               uint32_t handle);
+ 
+ /* drm_drv.c */
+ struct drm_minor *drm_minor_acquire(unsigned int minor_id);
+diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
+index 9f955f2010c25..825499ea3ff59 100644
+--- a/drivers/gpu/drm/drm_prime.c
++++ b/drivers/gpu/drm/drm_prime.c
+@@ -187,29 +187,33 @@ static int drm_prime_lookup_buf_handle(struct 
drm_prime_file_private *prime_fpri
+       return -ENOENT;
+ }
+ 
+-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private 
*prime_fpriv,
+-                                      struct dma_buf *dma_buf)
++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
++                               uint32_t handle)
+ {
+       struct rb_node *rb;
+ 
+-      rb = prime_fpriv->dmabufs.rb_node;
++      mutex_lock(&prime_fpriv->lock);
++
++      rb = prime_fpriv->handles.rb_node;
+       while (rb) {
+               struct drm_prime_member *member;
+ 
+-              member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
+-              if (member->dma_buf == dma_buf) {
++              member = rb_entry(rb, struct drm_prime_member, handle_rb);
++              if (member->handle == handle) {
+                       rb_erase(&member->handle_rb, &prime_fpriv->handles);
+                       rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
+ 
+-                      dma_buf_put(dma_buf);
++                      dma_buf_put(member->dma_buf);
+                       kfree(member);
+-                      return;
+-              } else if (member->dma_buf < dma_buf) {
++                      break;
++              } else if (member->handle < handle) {
+                       rb = rb->rb_right;
+               } else {
+                       rb = rb->rb_left;
+               }
+       }
++
++      mutex_unlock(&prime_fpriv->lock);
+ }
+ 
+ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c 
b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+index f2c8b56be9ead..261a5e97a0b4a 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+@@ -163,6 +163,28 @@ intel_dp_link_training_clock_recovery(struct intel_dp 
*intel_dp)
+       intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
+                             &link_bw, &rate_select);
+ 
++      /*
++       * WaEdpLinkRateDataReload
++       *
++       * Parade PS8461E MUX (used on varius TGL+ laptops) needs
++       * to snoop the link rates reported by the sink when we
++       * use LINK_RATE_SET in order to operate in jitter cleaning
++       * mode (as opposed to redriver mode). Unfortunately it
++       * loses track of the snooped link rates when powered down,
++       * so we need to make it re-snoop often. Without this high
++       * link rates are not stable.
++       */
++      if (!link_bw) {
++              struct intel_connector *connector = 
intel_dp->attached_connector;
++              __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
++
++              drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link 
rates\n",
++                          connector->base.base.id, connector->base.name);
++
++              drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
++                               sink_rates, sizeof(sink_rates));
++      }
++
+       if (link_bw)
+               drm_dbg_kms(&i915->drm,
+                           "Using LINK_BW_SET value %02x\n", link_bw);
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c 
b/drivers/gpu/drm/radeon/radeon_device.c
+index 266e3cbbd09bd..8287410f471fb 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1623,6 +1623,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool 
suspend,
+               if (r) {
+                       /* delay GPU reset to resume */
+                       radeon_fence_driver_force_completion(rdev, i);
++              } else {
++                      /* finish executing delayed work */
++                      flush_delayed_work(&rdev->fence_drv[i].lockup_work);
+               }
+       }
+ 
+diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
+index 046523d47c29b..41e3d3b54baff 100644
+--- a/drivers/hwmon/mr75203.c
++++ b/drivers/hwmon/mr75203.c
+@@ -68,8 +68,9 @@
+ 
+ /* VM Individual Macro Register */
+ #define VM_COM_REG_SIZE       0x200
+-#define VM_SDIF_DONE(n)       (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
+-#define VM_SDIF_DATA(n)       (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
++#define VM_SDIF_DONE(vm)      (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
++#define VM_SDIF_DATA(vm, ch)  \
++      (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
+ 
+ /* SDA Slave Register */
+ #define IP_CTRL                       0x00
+@@ -115,6 +116,7 @@ struct pvt_device {
+       u32                     t_num;
+       u32                     p_num;
+       u32                     v_num;
++      u32                     c_num;
+       u32                     ip_freq;
+       u8                      *vm_idx;
+ };
+@@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int 
channel, long *val)
+ {
+       struct pvt_device *pvt = dev_get_drvdata(dev);
+       struct regmap *v_map = pvt->v_map;
++      u8 vm_idx, ch_idx;
+       u32 n, stat;
+-      u8 vm_idx;
+       int ret;
+ 
+-      if (channel >= pvt->v_num)
++      if (channel >= pvt->v_num * pvt->c_num)
+               return -EINVAL;
+ 
+-      vm_idx = pvt->vm_idx[channel];
++      vm_idx = pvt->vm_idx[channel / pvt->c_num];
++      ch_idx = channel % pvt->c_num;
+ 
+       switch (attr) {
+       case hwmon_in_input:
+@@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int 
channel, long *val)
+               if (ret)
+                       return ret;
+ 
+-              ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
++              ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
+               if(ret < 0)
+                       return ret;
+ 
+               n &= SAMPLE_DATA_MSK;
+-              /* Convert the N bitstream count into voltage */
+-              *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
++              /*
++               * Convert the N bitstream count into voltage.
++               * To support negative voltage calculation for 64bit machines
++               * n must be cast to long, since n and *val differ both in
++               * signedness and in size.
++               * Division is used instead of right shift, because for signed
++               * numbers, the sign bit is used to fill the vacated bit
++               * positions, and if the number is negative, 1 is used.
++               * BIT(x) may not be used instead of (1 << x) because it's
++               * unsigned.
++               */
++              *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << 
PVT_CONV_BITS);
+ 
+               return 0;
+       default:
+@@ -385,6 +398,19 @@ static int pvt_init(struct pvt_device *pvt)
+               if (ret)
+                       return ret;
+ 
++              val = (BIT(pvt->c_num) - 1) | VM_CH_INIT |
++                    IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
++              ret = regmap_write(v_map, SDIF_W, val);
++              if (ret < 0)
++                      return ret;
++
++              ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
++                                             val, !(val & SDIF_BUSY),
++                                             PVT_POLL_DELAY_US,
++                                             PVT_POLL_TIMEOUT_US);
++              if (ret)
++                      return ret;
++
+               val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT |
+                     CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
+                     SDIF_WRN_W | SDIF_PROG;
+@@ -499,8 +525,8 @@ static int pvt_reset_control_deassert(struct device *dev, 
struct pvt_device *pvt
+ 
+ static int mr75203_probe(struct platform_device *pdev)
+ {
++      u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
+       const struct hwmon_channel_info **pvt_info;
+-      u32 ts_num, vm_num, pd_num, val, index, i;
+       struct device *dev = &pdev->dev;
+       u32 *temp_config, *in_config;
+       struct device *hwmon_dev;
+@@ -541,9 +567,11 @@ static int mr75203_probe(struct platform_device *pdev)
+       ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
+       pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
+       vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
++      ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
+       pvt->t_num = ts_num;
+       pvt->p_num = pd_num;
+       pvt->v_num = vm_num;
++      pvt->c_num = ch_num;
+       val = 0;
+       if (ts_num)
+               val++;
+@@ -580,7 +608,7 @@ static int mr75203_probe(struct platform_device *pdev)
+       }
+ 
+       if (vm_num) {
+-              u32 num = vm_num;
++              u32 total_ch;
+ 
+               ret = pvt_get_regmap(pdev, "vm", pvt);
+               if (ret)
+@@ -594,30 +622,30 @@ static int mr75203_probe(struct platform_device *pdev)
+               ret = device_property_read_u8_array(dev, "intel,vm-map",
+                                                   pvt->vm_idx, vm_num);
+               if (ret) {
+-                      num = 0;
++                      /*
++                       * Incase intel,vm-map property is not defined, we
++                       * assume incremental channel numbers.
++                       */
++                      for (i = 0; i < vm_num; i++)
++                              pvt->vm_idx[i] = i;
+               } else {
+                       for (i = 0; i < vm_num; i++)
+                               if (pvt->vm_idx[i] >= vm_num ||
+                                   pvt->vm_idx[i] == 0xff) {
+-                                      num = i;
++                                      pvt->v_num = i;
++                                      vm_num = i;
+                                       break;
+                               }
+               }
+ 
+-              /*
+-               * Incase intel,vm-map property is not defined, we assume
+-               * incremental channel numbers.
+-               */
+-              for (i = num; i < vm_num; i++)
+-                      pvt->vm_idx[i] = i;
+-
+-              in_config = devm_kcalloc(dev, num + 1,
++              total_ch = ch_num * vm_num;
++              in_config = devm_kcalloc(dev, total_ch + 1,
+                                        sizeof(*in_config), GFP_KERNEL);
+               if (!in_config)
+                       return -ENOMEM;
+ 
+-              memset32(in_config, HWMON_I_INPUT, num);
+-              in_config[num] = 0;
++              memset32(in_config, HWMON_I_INPUT, total_ch);
++              in_config[total_ch] = 0;
+               pvt_in.config = in_config;
+ 
+               pvt_info[index++] = &pvt_in;
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 3c40aa50cd60c..b5fa19a033c0a 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1722,8 +1722,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
+               }
+ 
+               if (!validate_net_dev(*net_dev,
+-                               (struct sockaddr *)&req->listen_addr_storage,
+-                               (struct sockaddr *)&req->src_addr_storage)) {
++                               (struct sockaddr *)&req->src_addr_storage,
++                               (struct sockaddr *)&req->listen_addr_storage)) 
{
+                       id_priv = ERR_PTR(-EHOSTUNREACH);
+                       goto err;
+               }
+diff --git a/drivers/infiniband/core/umem_odp.c 
b/drivers/infiniband/core/umem_odp.c
+index 323f6cf006824..af4af4789ef27 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -466,7 +466,7 @@ retry:
+               mutex_unlock(&umem_odp->umem_mutex);
+ 
+ out_put_mm:
+-      mmput(owning_mm);
++      mmput_async(owning_mm);
+ out_put_task:
+       if (owning_process)
+               put_task_struct(owning_process);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h 
b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index be7f2fe1e8839..8a92faeb3d237 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -92,7 +92,7 @@
+ 
+ #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ                PAGE_SIZE
+ #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ                PAGE_SIZE
+-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED               0xFFFFF000
++#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED               0xFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM                2
+ #define HNS_ROCE_INVALID_LKEY                 0x100
+ #define HNS_ROCE_CMQ_TX_TIMEOUT                       30000
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c 
b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 291e06d631505..6fe98af7741b5 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -386,11 +386,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, 
struct ib_qp_cap *cap,
+ 
+       hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
+ 
+-      if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
+-              hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+-      else
+-              hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+-                                          hr_qp->rq.max_gs);
++      hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
++                                  hr_qp->rq.max_gs);
+ 
+       hr_qp->rq.wqe_cnt = cnt;
+       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+diff --git a/drivers/infiniband/hw/mlx5/mad.c 
b/drivers/infiniband/hw/mlx5/mad.c
+index 9bb9bb058932f..cca7a4a6bd82d 100644
+--- a/drivers/infiniband/hw/mlx5/mad.c
++++ b/drivers/infiniband/hw/mlx5/mad.c
+@@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 
port_num,
+               mdev = dev->mdev;
+               mdev_port_num = 1;
+       }
++      if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
++              /* set local port to one for Function-Per-Port HCA. */
++              mdev = dev->mdev;
++              mdev_port_num = 1;
++      }
++
+       /* Declaring support of extended counters */
+       if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
+               struct ib_class_port_info cpi = {};
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c 
b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index 7989c4043db4e..3c3ae5ef29428 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 
addr, int *idx)
+       dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+ 
+       if (paddr)
+-              return virt_to_page(paddr);
++              return virt_to_page((void *)paddr);
+ 
+       return NULL;
+ }
+@@ -523,13 +523,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct 
socket *s)
+                                       kunmap(p);
+                               }
+                       } else {
+-                              u64 va = sge->laddr + sge_off;
++                              /*
++                               * Cast to an uintptr_t to preserve all 64 bits
++                               * in sge->laddr.
++                               */
++                              uintptr_t va = (uintptr_t)(sge->laddr + 
sge_off);
+ 
+-                              page_array[seg] = virt_to_page(va & PAGE_MASK);
++                              /*
++                               * virt_to_page() takes a (void *) pointer
++                               * so cast to a (void *) meaning it will be 64
++                               * bits on a 64 bit platform and 32 bits on a
++                               * 32 bit platform.
++                               */
++                              page_array[seg] = virt_to_page((void *)(va & 
PAGE_MASK));
+                               if (do_crc)
+                                       crypto_shash_update(
+                                               c_tx->mpa_crc_hd,
+-                                              (void *)(uintptr_t)va,
++                                              (void *)va,
+                                               plen);
+                       }
+ 
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 200cf5da5e0ad..f216a86d9c817 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -923,7 +923,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
+       cmd->data[1] = upper_32_bits(paddr);
+-      cmd->data[2] = data;
++      cmd->data[2] = lower_32_bits(data);
++      cmd->data[3] = upper_32_bits(data);
+       CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c 
b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index 32f3facbed1a5..b3cb5d1033260 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -178,6 +178,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi 
*vsi, bool reset)
+                       "Cannot locate client instance close routine\n");
+               return;
+       }
++      if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
++              dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
++              return;
++      }
+       cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
+       clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+       i40e_client_release_qvlist(&cdev->lan_info);
+@@ -374,7 +378,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
+                               /* Remove failed client instance */
+                               clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+                                         &cdev->state);
+-                              i40e_client_del_instance(pf);
+                               return;
+                       }
+               }
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c 
b/drivers/net/ethernet/intel/ice/ice_main.c
+index 810f2bdb91645..f193709c8efc6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3404,7 +3404,7 @@ static int ice_init_pf(struct ice_pf *pf)
+ 
+       pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
+       if (!pf->avail_rxqs) {
+-              devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
++              bitmap_free(pf->avail_txqs);
+               pf->avail_txqs = NULL;
+               return -ENOMEM;
+       }
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c 
b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+index 532e3b91777d9..150805aec4071 100644
+--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
++++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct 
il_lq_sta *lq_sta,
+               /* Repeat initial/next rate.
+                * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
+                * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
+-              while (repeat_rate > 0) {
++              while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
+                       if (is_legacy(tbl_type.lq_type)) {
+                               if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                                       ant_toggle_cnt++;
+@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct 
il_lq_sta *lq_sta,
+                           cpu_to_le32(new_rate);
+                       repeat_rate--;
+                       idx++;
+-                      if (idx >= LINK_QUAL_MAX_RETRY_NUM)
+-                              goto out;
+               }
+ 
+               il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct 
il_lq_sta *lq_sta,
+               repeat_rate--;
+       }
+ 
+-out:
+       lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+       lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ 
+diff --git a/drivers/net/xen-netback/xenbus.c 
b/drivers/net/xen-netback/xenbus.c
+index ca261e0fc9c9b..9ee9ce0493fe6 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
+               unsigned int queue_index;
+ 
+               xen_unregister_watchers(vif);
+-              xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
+ #ifdef CONFIG_DEBUG_FS
+               xenvif_debugfs_delif(vif);
+ #endif /* CONFIG_DEBUG_FS */
+@@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev)
+       struct backend_info *be = dev_get_drvdata(&dev->dev);
+ 
+       unregister_hotplug_status_watch(be);
++      xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+       if (be->vif) {
+               kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+               backend_disconnect(be);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index fe8c27bbc3f20..57df87def8c33 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -118,7 +118,6 @@ struct nvme_tcp_queue {
+       struct mutex            send_mutex;
+       struct llist_head       req_list;
+       struct list_head        send_list;
+-      bool                    more_requests;
+ 
+       /* recv state */
+       void                    *pdu;
+@@ -314,7 +313,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue 
*queue)
+ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+ {
+       return !list_empty(&queue->send_list) ||
+-              !llist_empty(&queue->req_list) || queue->more_requests;
++              !llist_empty(&queue->req_list);
+ }
+ 
+ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+@@ -333,9 +332,7 @@ static inline void nvme_tcp_queue_request(struct 
nvme_tcp_request *req,
+        */
+       if (queue->io_cpu == raw_smp_processor_id() &&
+           sync && empty && mutex_trylock(&queue->send_mutex)) {
+-              queue->more_requests = !last;
+               nvme_tcp_send_all(queue);
+-              queue->more_requests = false;
+               mutex_unlock(&queue->send_mutex);
+       }
+ 
+@@ -1196,7 +1193,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
+               else if (unlikely(result < 0))
+                       return;
+ 
+-              if (!pending)
++              if (!pending || !queue->rd_enabled)
+                       return;
+ 
+       } while (!time_after(jiffies, deadline)); /* quota is exhausted */
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 9a8fa2e582d5b..bc88ff2912f56 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -730,6 +730,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 
status)
+ 
+ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+ {
++      struct nvmet_ns *ns = req->ns;
++
+       if (!req->sq->sqhd_disabled)
+               nvmet_update_sq_head(req);
+       req->cqe->sq_id = cpu_to_le16(req->sq->qid);
+@@ -740,9 +742,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, 
u16 status)
+ 
+       trace_nvmet_req_complete(req);
+ 
+-      if (req->ns)
+-              nvmet_put_namespace(req->ns);
+       req->ops->queue_response(req);
++      if (ns)
++              nvmet_put_namespace(ns);
+ }
+ 
+ void nvmet_req_complete(struct nvmet_req *req, u16 status)
+diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
+index b916fab9b1618..ffd5000c23d39 100644
+--- a/drivers/parisc/ccio-dma.c
++++ b/drivers/parisc/ccio-dma.c
+@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, 
void __iomem *ioaddr)
+       }
+ }
+ 
+-static void __init ccio_init_resources(struct ioc *ioc)
++static int __init ccio_init_resources(struct ioc *ioc)
+ {
+       struct resource *res = ioc->mmio_region;
+       char *name = kmalloc(14, GFP_KERNEL);
+-
++      if (unlikely(!name))
++              return -ENOMEM;
+       snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
+ 
+       ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
+       ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
++      return 0;
+ }
+ 
+ static int new_ioc_area(struct resource *res, unsigned long size,
+@@ -1543,7 +1545,10 @@ static int __init ccio_probe(struct parisc_device *dev)
+               return -ENOMEM;
+       }
+       ccio_ioc_init(ioc);
+-      ccio_init_resources(ioc);
++      if (ccio_init_resources(ioc)) {
++              kfree(ioc);
++              return -ENOMEM;
++      }
+       hppa_dma_ops = &ccio_ops;
+ 
+       hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 6e3f3511e7ddd..317d701487ecd 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -2596,13 +2596,18 @@ static int _regulator_do_enable(struct regulator_dev 
*rdev)
+  */
+ static int _regulator_handle_consumer_enable(struct regulator *regulator)
+ {
++      int ret;
+       struct regulator_dev *rdev = regulator->rdev;
+ 
+       lockdep_assert_held_once(&rdev->mutex.base);
+ 
+       regulator->enable_count++;
+-      if (regulator->uA_load && regulator->enable_count == 1)
+-              return drms_uA_update(rdev);
++      if (regulator->uA_load && regulator->enable_count == 1) {
++              ret = drms_uA_update(rdev);
++              if (ret)
++                      regulator->enable_count--;
++              return ret;
++      }
+ 
+       return 0;
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 134e4ee5dc481..17200b453cbbb 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -6670,7 +6670,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+       /* Allocate device driver memory */
+       rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
+       if (rc)
+-              return -ENOMEM;
++              goto out_destroy_workqueue;
+ 
+       /* IF Type 2 ports get initialized now. */
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+@@ -7076,6 +7076,9 @@ out_free_bsmbx:
+       lpfc_destroy_bootstrap_mbox(phba);
+ out_free_mem:
+       lpfc_mem_free(phba);
++out_destroy_workqueue:
++      destroy_workqueue(phba->wq);
++      phba->wq = NULL;
+       return rc;
+ }
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c 
b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 13022a42fd6f4..7838c7911adde 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -5198,7 +5198,6 @@ megasas_alloc_fusion_context(struct megasas_instance 
*instance)
+               if (!fusion->log_to_span) {
+                       dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+                               __func__, __LINE__);
+-                      kfree(instance->ctrl_context);
+                       return -ENOMEM;
+               }
+       }
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c 
b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 8418b59b3743b..c3a5978b0efac 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -3501,6 +3501,7 @@ static struct fw_event_work 
*dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
+               fw_event = list_first_entry(&ioc->fw_event_list,
+                               struct fw_event_work, list);
+               list_del_init(&fw_event->list);
++              fw_event_work_put(fw_event);
+       }
+       spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ 
+@@ -3559,7 +3560,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER 
*ioc)
+               if (cancel_work_sync(&fw_event->work))
+                       fw_event_work_put(fw_event);
+ 
+-              fw_event_work_put(fw_event);
+       }
+       ioc->fw_events_cleanup = 0;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_target.c 
b/drivers/scsi/qla2xxx/qla_target.c
+index ba823e8eb902b..ecb30c2738b8b 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -6817,14 +6817,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
+ 
+       if (ha->flags.msix_enabled) {
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+-                      if (IS_QLA2071(ha)) {
+-                              /* 4 ports Baker: Enable Interrupt Handshake */
+-                              icb->msix_atio = 0;
+-                              icb->firmware_options_2 |= cpu_to_le32(BIT_26);
+-                      } else {
+-                              icb->msix_atio = cpu_to_le16(msix->entry);
+-                              icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
+-                      }
++                      icb->msix_atio = cpu_to_le16(msix->entry);
++                      icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
+                       ql_dbg(ql_dbg_init, vha, 0xf072,
+                           "Registering ICB vector 0x%x for atio que.\n",
+                           msix->entry);
+diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c 
b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+index c6ec7d95bcfcc..722fd54e537cf 100644
+--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+@@ -681,13 +681,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+       const struct of_device_id *of_id = NULL;
+       struct device_node *dn;
+       void __iomem *base;
+-      int ret, i;
++      int ret, i, s;
+ 
+       /* AON ctrl registers */
+       base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+       if (IS_ERR(base)) {
+               pr_err("error mapping AON_CTRL\n");
+-              return PTR_ERR(base);
++              ret = PTR_ERR(base);
++              goto aon_err;
+       }
+       ctrl.aon_ctrl_base = base;
+ 
+@@ -697,8 +698,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+               /* Assume standard offset */
+               ctrl.aon_sram = ctrl.aon_ctrl_base +
+                                    AON_CTRL_SYSTEM_DATA_RAM_OFS;
++              s = 0;
+       } else {
+               ctrl.aon_sram = base;
++              s = 1;
+       }
+ 
+       writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
+@@ -708,7 +711,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+                                    (const void **)&ddr_phy_data);
+       if (IS_ERR(base)) {
+               pr_err("error mapping DDR PHY\n");
+-              return PTR_ERR(base);
++              ret = PTR_ERR(base);
++              goto ddr_phy_err;
+       }
+       ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
+       ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
+@@ -728,17 +732,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+       for_each_matching_node(dn, ddr_shimphy_dt_ids) {
+               i = ctrl.num_memc;
+               if (i >= MAX_NUM_MEMC) {
++                      of_node_put(dn);
+                       pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+                       break;
+               }
+ 
+               base = of_io_request_and_map(dn, 0, dn->full_name);
+               if (IS_ERR(base)) {
++                      of_node_put(dn);
+                       if (!ctrl.support_warm_boot)
+                               break;
+ 
+                       pr_err("error mapping DDR SHIMPHY %d\n", i);
+-                      return PTR_ERR(base);
++                      ret = PTR_ERR(base);
++                      goto ddr_shimphy_err;
+               }
+               ctrl.memcs[i].ddr_shimphy_base = base;
+               ctrl.num_memc++;
+@@ -749,14 +756,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+       for_each_matching_node(dn, brcmstb_memc_of_match) {
+               base = of_iomap(dn, 0);
+               if (!base) {
++                      of_node_put(dn);
+                       pr_err("error mapping DDR Sequencer %d\n", i);
+-                      return -ENOMEM;
++                      ret = -ENOMEM;
++                      goto brcmstb_memc_err;
+               }
+ 
+               of_id = of_match_node(brcmstb_memc_of_match, dn);
+               if (!of_id) {
+                       iounmap(base);
+-                      return -EINVAL;
++                      of_node_put(dn);
++                      ret = -EINVAL;
++                      goto brcmstb_memc_err;
+               }
+ 
+               ddr_seq_data = of_id->data;
+@@ -776,21 +787,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+       dn = of_find_matching_node(NULL, sram_dt_ids);
+       if (!dn) {
+               pr_err("SRAM not found\n");
+-              return -EINVAL;
++              ret = -EINVAL;
++              goto brcmstb_memc_err;
+       }
+ 
+       ret = brcmstb_init_sram(dn);
+       of_node_put(dn);
+       if (ret) {
+               pr_err("error setting up SRAM for PM\n");
+-              return ret;
++              goto brcmstb_memc_err;
+       }
+ 
+       ctrl.pdev = pdev;
+ 
+       ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
+-      if (!ctrl.s3_params)
+-              return -ENOMEM;
++      if (!ctrl.s3_params) {
++              ret = -ENOMEM;
++              goto s3_params_err;
++      }
+       ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
+                                          sizeof(*ctrl.s3_params),
+                                          DMA_TO_DEVICE);
+@@ -810,7 +824,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
+ 
+ out:
+       kfree(ctrl.s3_params);
+-
++s3_params_err:
++      iounmap(ctrl.boot_sram);
++brcmstb_memc_err:
++      for (i--; i >= 0; i--)
++              iounmap(ctrl.memcs[i].ddr_ctrl);
++ddr_shimphy_err:
++      for (i = 0; i < ctrl.num_memc; i++)
++              iounmap(ctrl.memcs[i].ddr_shimphy_base);
++
++      iounmap(ctrl.memcs[0].ddr_phy_base);
++ddr_phy_err:
++      iounmap(ctrl.aon_ctrl_base);
++      if (s)
++              iounmap(ctrl.aon_sram);
++aon_err:
+       pr_warn("PM: initialization failed with code %d\n", ret);
+ 
+       return ret;
+diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
+index 499fccba3d74b..6fb4400333fb4 100644
+--- a/drivers/tee/tee_shm.c
++++ b/drivers/tee/tee_shm.c
+@@ -9,6 +9,7 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/tee_drv.h>
++#include <linux/uaccess.h>
+ #include <linux/uio.h>
+ #include "tee_private.h"
+ 
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index cb5ed4155a8d2..c91a3004931f1 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -235,7 +235,7 @@ struct gsm_mux {
+       int old_c_iflag;                /* termios c_iflag value before attach 
*/
+       bool constipated;               /* Asked by remote to shut up */
+ 
+-      spinlock_t tx_lock;
++      struct mutex tx_mutex;
+       unsigned int tx_bytes;          /* TX data outstanding */
+ #define TX_THRESH_HI          8192
+ #define TX_THRESH_LO          2048
+@@ -820,15 +820,14 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, 
struct gsm_msg *msg)
+  *
+  *    Add data to the transmit queue and try and get stuff moving
+  *    out of the mux tty if not already doing so. Take the
+- *    the gsm tx lock and dlci lock.
++ *    the gsm tx mutex and dlci lock.
+  */
+ 
+ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+ {
+-      unsigned long flags;
+-      spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
++      mutex_lock(&dlci->gsm->tx_mutex);
+       __gsm_data_queue(dlci, msg);
+-      spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
++      mutex_unlock(&dlci->gsm->tx_mutex);
+ }
+ 
+ /**
+@@ -840,7 +839,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct 
gsm_msg *msg)
+  *    is data. Keep to the MRU of the mux. This path handles the usual tty
+  *    interface which is a byte stream with optional modem data.
+  *
+- *    Caller must hold the tx_lock of the mux.
++ *    Caller must hold the tx_mutex of the mux.
+  */
+ 
+ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+@@ -903,7 +902,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, 
struct gsm_dlci *dlci)
+  *    is data. Keep to the MRU of the mux. This path handles framed data
+  *    queued as skbuffs to the DLCI.
+  *
+- *    Caller must hold the tx_lock of the mux.
++ *    Caller must hold the tx_mutex of the mux.
+  */
+ 
+ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+@@ -919,7 +918,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+       if (dlci->adaption == 4)
+               overhead = 1;
+ 
+-      /* dlci->skb is locked by tx_lock */
++      /* dlci->skb is locked by tx_mutex */
+       if (dlci->skb == NULL) {
+               dlci->skb = skb_dequeue_tail(&dlci->skb_list);
+               if (dlci->skb == NULL)
+@@ -1019,13 +1018,12 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+ 
+ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+ {
+-      unsigned long flags;
+       int sweep;
+ 
+       if (dlci->constipated)
+               return;
+ 
+-      spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
++      mutex_lock(&dlci->gsm->tx_mutex);
+       /* If we have nothing running then we need to fire up */
+       sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
+       if (dlci->gsm->tx_bytes == 0) {
+@@ -1036,7 +1034,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+       }
+       if (sweep)
+               gsm_dlci_data_sweep(dlci->gsm);
+-      spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
++      mutex_unlock(&dlci->gsm->tx_mutex);
+ }
+ 
+ /*
+@@ -1258,7 +1256,6 @@ static void gsm_control_message(struct gsm_mux *gsm, 
unsigned int command,
+                                               const u8 *data, int clen)
+ {
+       u8 buf[1];
+-      unsigned long flags;
+ 
+       switch (command) {
+       case CMD_CLD: {
+@@ -1280,9 +1277,9 @@ static void gsm_control_message(struct gsm_mux *gsm, 
unsigned int command,
+               gsm->constipated = false;
+               gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+               /* Kick the link in case it is idling */
+-              spin_lock_irqsave(&gsm->tx_lock, flags);
++              mutex_lock(&gsm->tx_mutex);
+               gsm_data_kick(gsm, NULL);
+-              spin_unlock_irqrestore(&gsm->tx_lock, flags);
++              mutex_unlock(&gsm->tx_mutex);
+               break;
+       case CMD_FCOFF:
+               /* Modem wants us to STFU */
+@@ -2200,11 +2197,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
+ {
+       struct gsm_dlci *dlci;
+ 
+-      timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+-      init_waitqueue_head(&gsm->event);
+-      spin_lock_init(&gsm->control_lock);
+-      spin_lock_init(&gsm->tx_lock);
+-
+       if (gsm->encoding == 0)
+               gsm->receive = gsm0_receive;
+       else
+@@ -2233,6 +2225,7 @@ static void gsm_free_mux(struct gsm_mux *gsm)
+                       break;
+               }
+       }
++      mutex_destroy(&gsm->tx_mutex);
+       mutex_destroy(&gsm->mutex);
+       kfree(gsm->txframe);
+       kfree(gsm->buf);
+@@ -2304,8 +2297,12 @@ static struct gsm_mux *gsm_alloc_mux(void)
+       }
+       spin_lock_init(&gsm->lock);
+       mutex_init(&gsm->mutex);
++      mutex_init(&gsm->tx_mutex);
+       kref_init(&gsm->ref);
+       INIT_LIST_HEAD(&gsm->tx_list);
++      timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
++      init_waitqueue_head(&gsm->event);
++      spin_lock_init(&gsm->control_lock);
+ 
+       gsm->t1 = T1;
+       gsm->t2 = T2;
+@@ -2330,6 +2327,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
+       }
+       spin_unlock(&gsm_mux_lock);
+       if (i == MAX_MUX) {
++              mutex_destroy(&gsm->tx_mutex);
+               mutex_destroy(&gsm->mutex);
+               kfree(gsm->txframe);
+               kfree(gsm->buf);
+@@ -2654,16 +2652,15 @@ static int gsmld_open(struct tty_struct *tty)
+ static void gsmld_write_wakeup(struct tty_struct *tty)
+ {
+       struct gsm_mux *gsm = tty->disc_data;
+-      unsigned long flags;
+ 
+       /* Queue poll */
+       clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+-      spin_lock_irqsave(&gsm->tx_lock, flags);
++      mutex_lock(&gsm->tx_mutex);
+       gsm_data_kick(gsm, NULL);
+       if (gsm->tx_bytes < TX_THRESH_LO) {
+               gsm_dlci_data_sweep(gsm);
+       }
+-      spin_unlock_irqrestore(&gsm->tx_lock, flags);
++      mutex_unlock(&gsm->tx_mutex);
+ }
+ 
+ /**
+@@ -2706,7 +2703,6 @@ static ssize_t gsmld_write(struct tty_struct *tty, 
struct file *file,
+                          const unsigned char *buf, size_t nr)
+ {
+       struct gsm_mux *gsm = tty->disc_data;
+-      unsigned long flags;
+       int space;
+       int ret;
+ 
+@@ -2714,13 +2710,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, 
struct file *file,
+               return -ENODEV;
+ 
+       ret = -ENOBUFS;
+-      spin_lock_irqsave(&gsm->tx_lock, flags);
++      mutex_lock(&gsm->tx_mutex);
+       space = tty_write_room(tty);
+       if (space >= nr)
+               ret = tty->ops->write(tty, buf, nr);
+       else
+               set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+-      spin_unlock_irqrestore(&gsm->tx_lock, flags);
++      mutex_unlock(&gsm->tx_mutex);
+ 
+       return ret;
+ }
+diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
+index 393894af26f84..2b00a9d554fc0 100644
+--- a/drivers/video/fbdev/chipsfb.c
++++ b/drivers/video/fbdev/chipsfb.c
+@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const 
struct pci_device_id *ent)
+  err_release_fb:
+       framebuffer_release(p);
+  err_disable:
++      pci_disable_device(dp);
+  err_out:
+       return rc;
+ }
+diff --git a/fs/afs/flock.c b/fs/afs/flock.c
+index cb3054c7843ea..466ad609f2057 100644
+--- a/fs/afs/flock.c
++++ b/fs/afs/flock.c
+@@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call)
+       if (call->error == 0) {
+               spin_lock(&vnode->lock);
+               trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
+-              vnode->locked_at = call->reply_time;
++              vnode->locked_at = call->issue_time;
+               afs_schedule_lock_extension(vnode);
+               spin_unlock(&vnode->lock);
+       }
+diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
+index 1d95ed9dd86e6..0048a32cb040e 100644
+--- a/fs/afs/fsclient.c
++++ b/fs/afs/fsclient.c
+@@ -130,7 +130,7 @@ bad:
+ 
+ static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
+ {
+-      return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
++      return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry;
+ }
+ 
+ static void xdr_decode_AFSCallBack(const __be32 **_bp,
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index dc08a3d9b3a8b..637cbe549397c 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -135,7 +135,6 @@ struct afs_call {
+       bool                    need_attention; /* T if RxRPC poked us */
+       bool                    async;          /* T if asynchronous */
+       bool                    upgrade;        /* T to request service upgrade 
*/
+-      bool                    have_reply_time; /* T if have got reply_time */
+       bool                    intr;           /* T if interruptible */
+       bool                    unmarshalling_error; /* T if an unmarshalling 
error occurred */
+       u16                     service_id;     /* Actual service ID (after 
upgrade) */
+@@ -149,7 +148,7 @@ struct afs_call {
+               } __attribute__((packed));
+               __be64          tmp64;
+       };
+-      ktime_t                 reply_time;     /* Time of first reply packet */
++      ktime_t                 issue_time;     /* Time of issue of operation */
+ };
+ 
+ struct afs_call_type {
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index efe0fb3ad8bdc..535d28b44bca3 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -429,6 +429,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct 
afs_call *call, gfp_t gfp)
+       if (call->max_lifespan)
+               rxrpc_kernel_set_max_life(call->net->socket, rxcall,
+                                         call->max_lifespan);
++      call->issue_time = ktime_get_real();
+ 
+       /* send the request */
+       iov[0].iov_base = call->request;
+@@ -533,12 +534,6 @@ static void afs_deliver_to_call(struct afs_call *call)
+                       return;
+               }
+ 
+-              if (!call->have_reply_time &&
+-                  rxrpc_kernel_get_reply_time(call->net->socket,
+-                                              call->rxcall,
+-                                              &call->reply_time))
+-                      call->have_reply_time = true;
+-
+               ret = call->type->deliver(call);
+               state = READ_ONCE(call->state);
+               if (ret == 0 && call->unmarshalling_error)
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
+index bd787e71a657f..5b2ef5ffd716f 100644
+--- a/fs/afs/yfsclient.c
++++ b/fs/afs/yfsclient.c
+@@ -239,8 +239,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp,
+       struct afs_callback *cb = &scb->callback;
+       ktime_t cb_expiry;
+ 
+-      cb_expiry = call->reply_time;
+-      cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
++      cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) 
* 100);
+       cb->expires_at  = ktime_divns(cb_expiry, NSEC_PER_SEC);
+       scb->have_cb    = true;
+       *_bp += xdr_size(x);
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 2fa3ba354cc96..001c26daacbaa 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -74,7 +74,6 @@ smb2_open_file(const unsigned int xid, struct 
cifs_open_parms *oparms,
+               nr_ioctl_req.Reserved = 0;
+               rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
+                       fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
+-                      true /* is_fsctl */,
+                       (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
+                       CIFSMaxBufSize, NULL, NULL /* no return info */);
+               if (rc == -EOPNOTSUPP) {
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index b6d72e3c5ebad..11efd5289ec43 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -587,7 +587,7 @@ SMB3_request_interfaces(const unsigned int xid, struct 
cifs_tcon *tcon)
+       struct cifs_ses *ses = tcon->ses;
+ 
+       rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+-                      FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
++                      FSCTL_QUERY_NETWORK_INTERFACE_INFO,
+                       NULL /* no data input */, 0 /* no data input */,
+                       CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
+       if (rc == -EOPNOTSUPP) {
+@@ -1470,9 +1470,8 @@ SMB2_request_res_key(const unsigned int xid, struct 
cifs_tcon *tcon,
+       struct resume_key_req *res_key;
+ 
+       rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+-                      FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
+-                      NULL, 0 /* no input */, CIFSMaxBufSize,
+-                      (char **)&res_key, &ret_data_len);
++                      FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
++                      CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
+ 
+       if (rc) {
+               cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume 
key\n", rc);
+@@ -1611,7 +1610,7 @@ smb2_ioctl_query_info(const unsigned int xid,
+               rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+ 
+               rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, 
COMPOUND_FID,
+-                                   qi.info_type, true, buffer, 
qi.output_buffer_length,
++                                   qi.info_type, buffer, 
qi.output_buffer_length,
+                                    CIFSMaxBufSize - 
MAX_SMB2_CREATE_RESPONSE_SIZE -
+                                    MAX_SMB2_CLOSE_RESPONSE_SIZE);
+               free_req1_func = SMB2_ioctl_free;
+@@ -1787,9 +1786,8 @@ smb2_copychunk_range(const unsigned int xid,
+               retbuf = NULL;
+               rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
+                       trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
+-                      true /* is_fsctl */, (char *)pcchunk,
+-                      sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
+-                      (char **)&retbuf, &ret_data_len);
++                      (char *)pcchunk, sizeof(struct copychunk_ioctl),
++                      CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
+               if (rc == 0) {
+                       if (ret_data_len !=
+                                       sizeof(struct copychunk_ioctl_rsp)) {
+@@ -1949,7 +1947,6 @@ static bool smb2_set_sparse(const unsigned int xid, 
struct cifs_tcon *tcon,
+ 
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
+-                      true /* is_fctl */,
+                       &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
+       if (rc) {
+               tcon->broken_sparse_sup = true;
+@@ -2032,7 +2029,6 @@ smb2_duplicate_extents(const unsigned int xid,
+       rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
+                       trgtfile->fid.volatile_fid,
+                       FSCTL_DUPLICATE_EXTENTS_TO_FILE,
+-                      true /* is_fsctl */,
+                       (char *)&dup_ext_buf,
+                       sizeof(struct duplicate_extents_to_file),
+                       CIFSMaxBufSize, NULL,
+@@ -2067,7 +2063,6 @@ smb3_set_integrity(const unsigned int xid, struct 
cifs_tcon *tcon,
+       return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid,
+                       FSCTL_SET_INTEGRITY_INFORMATION,
+-                      true /* is_fsctl */,
+                       (char *)&integr_info,
+                       sizeof(struct fsctl_set_integrity_information_req),
+                       CIFSMaxBufSize, NULL,
+@@ -2120,7 +2115,6 @@ smb3_enum_snapshots(const unsigned int xid, struct 
cifs_tcon *tcon,
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid,
+                       FSCTL_SRV_ENUMERATE_SNAPSHOTS,
+-                      true /* is_fsctl */,
+                       NULL, 0 /* no input data */, max_response_size,
+                       (char **)&retbuf,
+                       &ret_data_len);
+@@ -2762,7 +2756,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct 
cifs_ses *ses,
+       do {
+               rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+                               FSCTL_DFS_GET_REFERRALS,
+-                              true /* is_fsctl */,
+                               (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
+                               (char **)&dfs_rsp, &dfs_rsp_size);
+       } while (rc == -EAGAIN);
+@@ -2964,8 +2957,7 @@ smb2_query_symlink(const unsigned int xid, struct 
cifs_tcon *tcon,
+ 
+       rc = SMB2_ioctl_init(tcon, server,
+                            &rqst[1], fid.persistent_fid,
+-                           fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
+-                           true /* is_fctl */, NULL, 0,
++                           fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0,
+                            CIFSMaxBufSize -
+                            MAX_SMB2_CREATE_RESPONSE_SIZE -
+                            MAX_SMB2_CLOSE_RESPONSE_SIZE);
+@@ -3145,8 +3137,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct 
cifs_tcon *tcon,
+ 
+       rc = SMB2_ioctl_init(tcon, server,
+                            &rqst[1], COMPOUND_FID,
+-                           COMPOUND_FID, FSCTL_GET_REPARSE_POINT,
+-                           true /* is_fctl */, NULL, 0,
++                           COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0,
+                            CIFSMaxBufSize -
+                            MAX_SMB2_CREATE_RESPONSE_SIZE -
+                            MAX_SMB2_CLOSE_RESPONSE_SIZE);
+@@ -3409,7 +3400,7 @@ static long smb3_zero_range(struct file *file, struct 
cifs_tcon *tcon,
+       fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+ 
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-                      cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
++                      cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+                       (char *)&fsctl_buf,
+                       sizeof(struct file_zero_data_information),
+                       0, NULL, NULL);
+@@ -3439,7 +3430,7 @@ static long smb3_zero_range(struct file *file, struct 
cifs_tcon *tcon,
+ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+                           loff_t offset, loff_t len)
+ {
+-      struct inode *inode;
++      struct inode *inode = file_inode(file);
+       struct cifsFileInfo *cfile = file->private_data;
+       struct file_zero_data_information fsctl_buf;
+       long rc;
+@@ -3448,14 +3439,12 @@ static long smb3_punch_hole(struct file *file, struct 
cifs_tcon *tcon,
+ 
+       xid = get_xid();
+ 
+-      inode = d_inode(cfile->dentry);
+-
++      inode_lock(inode);
+       /* Need to make file sparse, if not already, before freeing range. */
+       /* Consider adding equivalent for compressed since it could also work */
+       if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
+               rc = -EOPNOTSUPP;
+-              free_xid(xid);
+-              return rc;
++              goto out;
+       }
+ 
+       /*
+@@ -3471,9 +3460,11 @@ static long smb3_punch_hole(struct file *file, struct 
cifs_tcon *tcon,
+ 
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+-                      true /* is_fctl */, (char *)&fsctl_buf,
++                      (char *)&fsctl_buf,
+                       sizeof(struct file_zero_data_information),
+                       CIFSMaxBufSize, NULL, NULL);
++out:
++      inode_unlock(inode);
+       free_xid(xid);
+       return rc;
+ }
+@@ -3530,7 +3521,7 @@ static int smb3_simple_fallocate_range(unsigned int xid,
+       in_data.length = cpu_to_le64(len);
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid,
+-                      FSCTL_QUERY_ALLOCATED_RANGES, true,
++                      FSCTL_QUERY_ALLOCATED_RANGES,
+                       (char *)&in_data, sizeof(in_data),
+                       1024 * sizeof(struct file_allocated_range_buffer),
+                       (char **)&out_data, &out_data_len);
+@@ -3771,7 +3762,7 @@ static loff_t smb3_llseek(struct file *file, struct 
cifs_tcon *tcon, loff_t offs
+ 
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid,
+-                      FSCTL_QUERY_ALLOCATED_RANGES, true,
++                      FSCTL_QUERY_ALLOCATED_RANGES,
+                       (char *)&in_data, sizeof(in_data),
+                       sizeof(struct file_allocated_range_buffer),
+                       (char **)&out_data, &out_data_len);
+@@ -3831,7 +3822,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
+ 
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid,
+-                      FSCTL_QUERY_ALLOCATED_RANGES, true,
++                      FSCTL_QUERY_ALLOCATED_RANGES,
+                       (char *)&in_data, sizeof(in_data),
+                       1024 * sizeof(struct file_allocated_range_buffer),
+                       (char **)&out_data, &out_data_len);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 24dd711fa9b95..7ee8abd1f79be 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1081,7 +1081,7 @@ int smb3_validate_negotiate(const unsigned int xid, 
struct cifs_tcon *tcon)
+       }
+ 
+       rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+-              FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
++              FSCTL_VALIDATE_NEGOTIATE_INFO,
+               (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
+               (char **)&pneg_rsp, &rsplen);
+       if (rc == -EOPNOTSUPP) {
+@@ -2922,7 +2922,7 @@ int
+ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+               struct smb_rqst *rqst,
+               u64 persistent_fid, u64 volatile_fid, u32 opcode,
+-              bool is_fsctl, char *in_data, u32 indatalen,
++              char *in_data, u32 indatalen,
+               __u32 max_response_size)
+ {
+       struct smb2_ioctl_req *req;
+@@ -2997,10 +2997,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct 
TCP_Server_Info *server,
+       req->sync_hdr.CreditCharge =
+               cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
+                                        SMB2_MAX_BUFFER_SIZE));
+-      if (is_fsctl)
+-              req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
+-      else
+-              req->Flags = 0;
++      /* always an FSCTL (for now) */
++      req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
+ 
+       /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
+       if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
+@@ -3027,9 +3025,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
+  */
+ int
+ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+-         u64 volatile_fid, u32 opcode, bool is_fsctl,
+-         char *in_data, u32 indatalen, u32 max_out_data_len,
+-         char **out_data, u32 *plen /* returned data len */)
++         u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
++         u32 max_out_data_len, char **out_data,
++         u32 *plen /* returned data len */)
+ {
+       struct smb_rqst rqst;
+       struct smb2_ioctl_rsp *rsp = NULL;
+@@ -3071,7 +3069,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon 
*tcon, u64 persistent_fid,
+ 
+       rc = SMB2_ioctl_init(tcon, server,
+                            &rqst, persistent_fid, volatile_fid, opcode,
+-                           is_fsctl, in_data, indatalen, max_out_data_len);
++                           in_data, indatalen, max_out_data_len);
+       if (rc)
+               goto ioctl_exit;
+ 
+@@ -3153,7 +3151,7 @@ SMB2_set_compression(const unsigned int xid, struct 
cifs_tcon *tcon,
+                       cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+ 
+       rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+-                      FSCTL_SET_COMPRESSION, true /* is_fsctl */,
++                      FSCTL_SET_COMPRESSION,
+                       (char *)&fsctl_input /* data input */,
+                       2 /* in data len */, CIFSMaxBufSize /* max out data */,
+                       &ret_data /* out data */, NULL);
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 4eb0ca84355a6..ed2b4fb012a41 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -155,13 +155,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon,
+ extern void SMB2_open_free(struct smb_rqst *rqst);
+ extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
+                    u64 persistent_fid, u64 volatile_fid, u32 opcode,
+-                   bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
++                   char *in_data, u32 indatalen, u32 maxoutlen,
+                    char **out_data, u32 *plen /* returned data len */);
+ extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
+                          struct TCP_Server_Info *server,
+                          struct smb_rqst *rqst,
+                          u64 persistent_fid, u64 volatile_fid, u32 opcode,
+-                         bool is_fsctl, char *in_data, u32 indatalen,
++                         char *in_data, u32 indatalen,
+                          __u32 max_response_size);
+ extern void SMB2_ioctl_free(struct smb_rqst *rqst);
+ extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 848e0aaa8da5d..f47f0a7d2c3b9 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -730,6 +730,28 @@ void debugfs_remove(struct dentry *dentry)
+ }
+ EXPORT_SYMBOL_GPL(debugfs_remove);
+ 
++/**
++ * debugfs_lookup_and_remove - lookup a directory or file and recursively 
remove it
++ * @name: a pointer to a string containing the name of the item to look up.
++ * @parent: a pointer to the parent dentry of the item.
++ *
++ * This is the equlivant of doing something like
++ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting
++ * handled for the directory being looked up.
++ */
++void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
++{
++      struct dentry *dentry;
++
++      dentry = debugfs_lookup(name, parent);
++      if (!dentry)
++              return;
++
++      debugfs_remove(dentry);
++      dput(dentry);
++}
++EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
++
+ /**
+  * debugfs_rename - rename a file/directory in the debugfs filesystem
+  * @old_dir: a pointer to the parent dentry for the renamed object. This
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index c852bb5ff2121..a4ae1fcd2ab1e 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1014,6 +1014,10 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh 
*fhp, struct nfsd_file *nf,
+       iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
+       since = READ_ONCE(file->f_wb_err);
+       if (flags & RWF_SYNC) {
++              if (verf)
++                      nfsd_copy_boot_verifier(verf,
++                                      net_generic(SVC_NET(rqstp),
++                                      nfsd_net_id));
+               host_err = vfs_iter_write(file, &iter, &pos, flags);
+               if (host_err < 0)
+                       nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index 20a2ff1c07a1b..e93e3faa82296 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -136,6 +136,17 @@ BUFFER_FNS(Defer_Completion, defer_completion)
+ 
+ static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+ {
++      /*
++       * If somebody else already set this uptodate, they will
++       * have done the memory barrier, and a reader will thus
++       * see *some* valid buffer state.
++       *
++       * Any other serialization (with IO errors or whatever that
++       * might clear the bit) has to come from other state (eg BH_Lock).
++       */
++      if (test_bit(BH_Uptodate, &bh->b_state))
++              return;
++
+       /*
+        * make it consistent with folio_mark_uptodate
+        * pairs with smp_load_acquire in buffer_uptodate
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
+index d6c4cc9ecc77c..2357109a8901b 100644
+--- a/include/linux/debugfs.h
++++ b/include/linux/debugfs.h
+@@ -91,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name,
+ void debugfs_remove(struct dentry *dentry);
+ #define debugfs_remove_recursive debugfs_remove
+ 
++void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
++
+ const struct file_operations *debugfs_real_fops(const struct file *filp);
+ 
+ int debugfs_file_get(struct dentry *dentry);
+@@ -220,6 +222,10 @@ static inline void debugfs_remove(struct dentry *dentry)
+ static inline void debugfs_remove_recursive(struct dentry *dentry)
+ { }
+ 
++static inline void debugfs_lookup_and_remove(const char *name,
++                                           struct dentry *parent)
++{ }
++
+ const struct file_operations *debugfs_real_fops(const struct file *filp);
+ 
+ static inline int debugfs_file_get(struct dentry *dentry)
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 5046c99deba86..684c16849eff3 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2304,6 +2304,47 @@ int task_cgroup_path(struct task_struct *task, char 
*buf, size_t buflen)
+ }
+ EXPORT_SYMBOL_GPL(task_cgroup_path);
+ 
++/**
++ * cgroup_attach_lock - Lock for ->attach()
++ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
++ *
++ * cgroup migration sometimes needs to stabilize threadgroups against forks 
and
++ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
++ * implementations (e.g. cpuset), also need to disable CPU hotplug.
++ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
++ * lead to deadlocks.
++ *
++ * Bringing up a CPU may involve creating and destroying tasks which requires
++ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
++ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock 
while
++ * write-locking threadgroup_rwsem, the locking order is reversed and we end 
up
++ * waiting for an on-going CPU hotplug operation which in turn is waiting for
++ * the threadgroup_rwsem to be released to create new tasks. For more details:
++ *
++ *   http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
++ *
++ * Resolve the situation by always acquiring cpus_read_lock() before 
optionally
++ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume 
that
++ * CPU hotplug is disabled on entry.
++ */
++static void cgroup_attach_lock(bool lock_threadgroup)
++{
++      cpus_read_lock();
++      if (lock_threadgroup)
++              percpu_down_write(&cgroup_threadgroup_rwsem);
++}
++
++/**
++ * cgroup_attach_unlock - Undo cgroup_attach_lock()
++ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
++ */
++static void cgroup_attach_unlock(bool lock_threadgroup)
++{
++      if (lock_threadgroup)
++              percpu_up_write(&cgroup_threadgroup_rwsem);
++      cpus_read_unlock();
++}
++
+ /**
+  * cgroup_migrate_add_task - add a migration target task to a migration 
context
+  * @task: target task
+@@ -2780,8 +2821,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct 
task_struct *leader,
+ }
+ 
+ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
+-                                           bool *locked)
+-      __acquires(&cgroup_threadgroup_rwsem)
++                                           bool *threadgroup_locked)
+ {
+       struct task_struct *tsk;
+       pid_t pid;
+@@ -2798,12 +2838,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, 
bool threadgroup,
+        * Therefore, we can skip the global lock.
+        */
+       lockdep_assert_held(&cgroup_mutex);
+-      if (pid || threadgroup) {
+-              percpu_down_write(&cgroup_threadgroup_rwsem);
+-              *locked = true;
+-      } else {
+-              *locked = false;
+-      }
++      *threadgroup_locked = pid || threadgroup;
++      cgroup_attach_lock(*threadgroup_locked);
+ 
+       rcu_read_lock();
+       if (pid) {
+@@ -2834,17 +2870,14 @@ struct task_struct *cgroup_procs_write_start(char 
*buf, bool threadgroup,
+       goto out_unlock_rcu;
+ 
+ out_unlock_threadgroup:
+-      if (*locked) {
+-              percpu_up_write(&cgroup_threadgroup_rwsem);
+-              *locked = false;
+-      }
++      cgroup_attach_unlock(*threadgroup_locked);
++      *threadgroup_locked = false;
+ out_unlock_rcu:
+       rcu_read_unlock();
+       return tsk;
+ }
+ 
+-void cgroup_procs_write_finish(struct task_struct *task, bool locked)
+-      __releases(&cgroup_threadgroup_rwsem)
++void cgroup_procs_write_finish(struct task_struct *task, bool 
threadgroup_locked)
+ {
+       struct cgroup_subsys *ss;
+       int ssid;
+@@ -2852,8 +2885,8 @@ void cgroup_procs_write_finish(struct task_struct *task, 
bool locked)
+       /* release reference from cgroup_procs_write_start() */
+       put_task_struct(task);
+ 
+-      if (locked)
+-              percpu_up_write(&cgroup_threadgroup_rwsem);
++      cgroup_attach_unlock(threadgroup_locked);
++
+       for_each_subsys(ss, ssid)
+               if (ss->post_attach)
+                       ss->post_attach();
+@@ -2908,12 +2941,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
+       struct cgroup_subsys_state *d_css;
+       struct cgroup *dsct;
+       struct css_set *src_cset;
++      bool has_tasks;
+       int ret;
+ 
+       lockdep_assert_held(&cgroup_mutex);
+ 
+-      percpu_down_write(&cgroup_threadgroup_rwsem);
+-
+       /* look up all csses currently attached to @cgrp's subtree */
+       spin_lock_irq(&css_set_lock);
+       cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
+@@ -2924,6 +2956,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
+       }
+       spin_unlock_irq(&css_set_lock);
+ 
++      /*
++       * We need to write-lock threadgroup_rwsem while migrating tasks.
++       * However, if there are no source csets for @cgrp, changing its
++       * controllers isn't gonna produce any task migrations and the
++       * write-locking can be skipped safely.
++       */
++      has_tasks = !list_empty(&mgctx.preloaded_src_csets);
++      cgroup_attach_lock(has_tasks);
++
+       /* NULL dst indicates self on default hierarchy */
+       ret = cgroup_migrate_prepare_dst(&mgctx);
+       if (ret)
+@@ -2943,7 +2984,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
+       ret = cgroup_migrate_execute(&mgctx);
+ out_finish:
+       cgroup_migrate_finish(&mgctx);
+-      percpu_up_write(&cgroup_threadgroup_rwsem);
++      cgroup_attach_unlock(has_tasks);
+       return ret;
+ }
+ 
+@@ -4799,13 +4840,13 @@ static ssize_t cgroup_procs_write(struct 
kernfs_open_file *of,
+       struct task_struct *task;
+       const struct cred *saved_cred;
+       ssize_t ret;
+-      bool locked;
++      bool threadgroup_locked;
+ 
+       dst_cgrp = cgroup_kn_lock_live(of->kn, false);
+       if (!dst_cgrp)
+               return -ENODEV;
+ 
+-      task = cgroup_procs_write_start(buf, true, &locked);
++      task = cgroup_procs_write_start(buf, true, &threadgroup_locked);
+       ret = PTR_ERR_OR_ZERO(task);
+       if (ret)
+               goto out_unlock;
+@@ -4831,7 +4872,7 @@ static ssize_t cgroup_procs_write(struct 
kernfs_open_file *of,
+       ret = cgroup_attach_task(dst_cgrp, task, true);
+ 
+ out_finish:
+-      cgroup_procs_write_finish(task, locked);
++      cgroup_procs_write_finish(task, threadgroup_locked);
+ out_unlock:
+       cgroup_kn_unlock(of->kn);
+ 
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index c51863b63f93a..b7830f1f1f3a5 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2212,7 +2212,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+       cgroup_taskset_first(tset, &css);
+       cs = css_cs(css);
+ 
+-      cpus_read_lock();
++      lockdep_assert_cpus_held();     /* see cgroup_attach_lock() */
+       percpu_down_write(&cpuset_rwsem);
+ 
+       /* prepare for attach */
+@@ -2268,7 +2268,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+               wake_up(&cpuset_attach_wq);
+ 
+       percpu_up_write(&cpuset_rwsem);
+-      cpus_read_unlock();
+ }
+ 
+ /* The various types of files and directories in a cpuset file system */
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 274587a57717f..4a9831d01f0ea 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -452,7 +452,10 @@ static void swiotlb_bounce(phys_addr_t orig_addr, 
phys_addr_t tlb_addr,
+       }
+ }
+ 
+-#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
++static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
++{
++      return start + (idx << IO_TLB_SHIFT);
++}
+ 
+ /*
+  * Return the offset into a iotlb slot required to keep the device happy.
+diff --git a/kernel/fork.c b/kernel/fork.c
+index a78c0b02edd55..b877480c901f0 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1127,6 +1127,7 @@ void mmput_async(struct mm_struct *mm)
+               schedule_work(&mm->async_put_work);
+       }
+ }
++EXPORT_SYMBOL_GPL(mmput_async);
+ #endif
+ 
+ /**
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index a93407da0ae10..dac82a0e7c0b0 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1642,6 +1642,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
+       /* Ensure it is not in reserved area nor out of text */
+       if (!(core_kernel_text((unsigned long) p->addr) ||
+           is_module_text_address((unsigned long) p->addr)) ||
++          in_gate_area_no_mm((unsigned long) p->addr) ||
+           within_kprobe_blacklist((unsigned long) p->addr) ||
+           jump_label_text_reserved(p->addr, p->addr) ||
+           static_call_text_reserved(p->addr, p->addr) ||
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 5bfae0686199e..4801751cb6b6d 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1123,7 +1123,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
+ void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+                              gfp_t gfp)
+ {
+-      if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
++      if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_alloc(__va(phys), size, min_count, gfp);
+ }
+ EXPORT_SYMBOL(kmemleak_alloc_phys);
+@@ -1137,7 +1137,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
+  */
+ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+ {
+-      if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
++      if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_free_part(__va(phys), size);
+ }
+ EXPORT_SYMBOL(kmemleak_free_part_phys);
+@@ -1149,7 +1149,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
+  */
+ void __ref kmemleak_not_leak_phys(phys_addr_t phys)
+ {
+-      if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
++      if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_not_leak(__va(phys));
+ }
+ EXPORT_SYMBOL(kmemleak_not_leak_phys);
+@@ -1161,7 +1161,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
+  */
+ void __ref kmemleak_ignore_phys(phys_addr_t phys)
+ {
+-      if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
++      if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_ignore(__va(phys));
+ }
+ EXPORT_SYMBOL(kmemleak_ignore_phys);
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 10a2c7bca7199..a718204c4bfdd 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, 
struct sock *sk, struct sk_
+                               /* - Bridged-and-DNAT'ed traffic doesn't
+                                *   require ip_forwarding. */
+                               if (rt->dst.dev == dev) {
++                                      skb_dst_drop(skb);
+                                       skb_dst_set(skb, &rt->dst);
+                                       goto bridged_dnat;
+                               }
+@@ -413,6 +414,7 @@ bridged_dnat:
+                       kfree_skb(skb);
+                       return 0;
+               }
++              skb_dst_drop(skb);
+               skb_dst_set_noref(skb, &rt->dst);
+       }
+ 
+diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
+index e4e0c836c3f51..6b07f30675bb0 100644
+--- a/net/bridge/br_netfilter_ipv6.c
++++ b/net/bridge/br_netfilter_ipv6.c
+@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, 
struct sock *sk, struc
+                       kfree_skb(skb);
+                       return 0;
+               }
++              skb_dst_drop(skb);
+               skb_dst_set_noref(skb, &rt->dst);
+       }
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 635cabcf8794f..7bdcdad58dc86 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3986,9 +3986,8 @@ normal:
+                               SKB_GSO_CB(nskb)->csum_start =
+                                       skb_headroom(nskb) + doffset;
+                       } else {
+-                              skb_copy_bits(head_skb, offset,
+-                                            skb_put(nskb, len),
+-                                            len);
++                              if (skb_copy_bits(head_skb, offset, 
skb_put(nskb, len), len))
++                                      goto err;
+                       }
+                       continue;
+               }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index e62500d6fe0d0..4ecd85b1e806c 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2496,6 +2496,21 @@ static inline bool tcp_may_undo(const struct tcp_sock 
*tp)
+       return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
+ }
+ 
++static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
++{
++      struct tcp_sock *tp = tcp_sk(sk);
++
++      if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
++              /* Hold old state until something *above* high_seq
++               * is ACKed. For Reno it is MUST to prevent false
++               * fast retransmits (RFC2582). SACK TCP is safe. */
++              if (!tcp_any_retrans_done(sk))
++                      tp->retrans_stamp = 0;
++              return true;
++      }
++      return false;
++}
++
+ /* People celebrate: "We love our President!" */
+ static bool tcp_try_undo_recovery(struct sock *sk)
+ {
+@@ -2518,14 +2533,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
+       } else if (tp->rack.reo_wnd_persist) {
+               tp->rack.reo_wnd_persist--;
+       }
+-      if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
+-              /* Hold old state until something *above* high_seq
+-               * is ACKed. For Reno it is MUST to prevent false
+-               * fast retransmits (RFC2582). SACK TCP is safe. */
+-              if (!tcp_any_retrans_done(sk))
+-                      tp->retrans_stamp = 0;
++      if (tcp_is_non_sack_preventing_reopen(sk))
+               return true;
+-      }
+       tcp_set_ca_state(sk, TCP_CA_Open);
+       tp->is_sack_reneg = 0;
+       return false;
+@@ -2561,6 +2570,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool 
frto_undo)
+                       NET_INC_STATS(sock_net(sk),
+                                       LINUX_MIB_TCPSPURIOUSRTOS);
+               inet_csk(sk)->icsk_retransmits = 0;
++              if (tcp_is_non_sack_preventing_reopen(sk))
++                      return true;
+               if (frto_undo || tcp_is_sack(tp)) {
+                       tcp_set_ca_state(sk, TCP_CA_Open);
+                       tp->is_sack_reneg = 0;
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index d2f8138e5a73a..2278c0234c497 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -135,6 +135,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct 
genl_info *info)
+               goto out_unlock;
+       }
+ 
++      if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) {
++              err = -EINVAL;
++              goto out_unlock;
++      }
++
+       if (hinfo) {
+               err = seg6_hmac_info_del(net, hmackeyid);
+               if (err)
+diff --git a/net/netfilter/nf_conntrack_irc.c 
b/net/netfilter/nf_conntrack_irc.c
+index e40988a2f22fb..26245419ef4a9 100644
+--- a/net/netfilter/nf_conntrack_irc.c
++++ b/net/netfilter/nf_conntrack_irc.c
+@@ -185,8 +185,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
+ 
+                       /* dcc_ip can be the internal OR external (NAT'ed) IP */
+                       tuple = &ct->tuplehash[dir].tuple;
+-                      if (tuple->src.u3.ip != dcc_ip &&
+-                          tuple->dst.u3.ip != dcc_ip) {
++                      if ((tuple->src.u3.ip != dcc_ip &&
++                           ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) ||
++                          dcc_port == 0) {
+                               net_warn_ratelimited("Forged DCC command from 
%pI4: %pI4:%u\n",
+                                                    &tuple->src.u3.ip,
+                                                    &dcc_ip, dcc_port);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 1b039476e4d6a..b8e7e1c5c08a8 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1971,8 +1971,10 @@ static int nft_basechain_init(struct nft_base_chain 
*basechain, u8 family,
+       chain->flags |= NFT_CHAIN_BASE | flags;
+       basechain->policy = NF_ACCEPT;
+       if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
+-          !nft_chain_offload_support(basechain))
++          !nft_chain_offload_support(basechain)) {
++              list_splice_init(&basechain->hook_list, &hook->list);
+               return -EOPNOTSUPP;
++      }
+ 
+       flow_block_init(&basechain->flow_block);
+ 
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
+index f114dc2af5cf3..5345e8eefd33c 100644
+--- a/net/rxrpc/rxkad.c
++++ b/net/rxrpc/rxkad.c
+@@ -451,7 +451,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, 
struct sk_buff *skb,
+        * directly into the target buffer.
+        */
+       sg = _sg;
+-      nsg = skb_shinfo(skb)->nr_frags;
++      nsg = skb_shinfo(skb)->nr_frags + 1;
+       if (nsg <= 4) {
+               nsg = 4;
+       } else {
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index da047a37a3bf3..b2724057629f6 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, 
struct sfb_sched_data *q)
+       }
+ }
+ 
+-static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data 
*q)
++static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data 
*q)
+ {
+       u32 sfbhash;
+ 
+-      sfbhash = sfb_hash(skb, 0);
++      sfbhash = cb->hashes[0];
+       if (sfbhash)
+               increment_one_qlen(sfbhash, 0, q);
+ 
+-      sfbhash = sfb_hash(skb, 1);
++      sfbhash = cb->hashes[1];
+       if (sfbhash)
+               increment_one_qlen(sfbhash, 1, q);
+ }
+@@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+ {
+ 
+       struct sfb_sched_data *q = qdisc_priv(sch);
++      unsigned int len = qdisc_pkt_len(skb);
+       struct Qdisc *child = q->qdisc;
+       struct tcf_proto *fl;
++      struct sfb_skb_cb cb;
+       int i;
+       u32 p_min = ~0;
+       u32 minqlen = ~0;
+@@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+       }
+ 
+ enqueue:
++      memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
+       ret = qdisc_enqueue(skb, child, to_free);
+       if (likely(ret == NET_XMIT_SUCCESS)) {
+-              qdisc_qstats_backlog_inc(sch, skb);
++              sch->qstats.backlog += len;
+               sch->q.qlen++;
+-              increment_qlen(skb, q);
++              increment_qlen(&cb, q);
+       } else if (net_xmit_drop_count(ret)) {
+               q->stats.childdrop++;
+               qdisc_qstats_drop(sch);
+diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
+index a37190da5a504..1d90f39129ca0 100644
+--- a/net/tipc/monitor.c
++++ b/net/tipc/monitor.c
+@@ -130,7 +130,7 @@ static void map_set(u64 *up_map, int i, unsigned int v)
+ 
+ static int map_get(u64 up_map, int i)
+ {
+-      return (up_map & (1 << i)) >> i;
++      return (up_map & (1ULL << i)) >> i;
+ }
+ 
+ static struct tipc_peer *peer_prev(struct tipc_peer *peer)
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index 2c5f7e905ab8f..fb45a32d99cd9 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -606,17 +606,18 @@ static unsigned int loopback_jiffies_timer_pos_update
+                       cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
+       struct loopback_pcm *dpcm_capt =
+                       cable->streams[SNDRV_PCM_STREAM_CAPTURE];
+-      unsigned long delta_play = 0, delta_capt = 0;
++      unsigned long delta_play = 0, delta_capt = 0, cur_jiffies;
+       unsigned int running, count1, count2;
+ 
++      cur_jiffies = jiffies;
+       running = cable->running ^ cable->pause;
+       if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
+-              delta_play = jiffies - dpcm_play->last_jiffies;
++              delta_play = cur_jiffies - dpcm_play->last_jiffies;
+               dpcm_play->last_jiffies += delta_play;
+       }
+ 
+       if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) {
+-              delta_capt = jiffies - dpcm_capt->last_jiffies;
++              delta_capt = cur_jiffies - dpcm_capt->last_jiffies;
+               dpcm_capt->last_jiffies += delta_capt;
+       }
+ 
+diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
+index b2ddabb994381..8d2c101d66a23 100644
+--- a/sound/pci/emu10k1/emupcm.c
++++ b/sound/pci/emu10k1/emupcm.c
+@@ -123,7 +123,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct 
snd_emu10k1_pcm * epcm, int voic
+       epcm->voices[0]->epcm = epcm;
+       if (voices > 1) {
+               for (i = 1; i < voices; i++) {
+-                      epcm->voices[i] = 
&epcm->emu->voices[epcm->voices[0]->number + i];
++                      epcm->voices[i] = 
&epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G];
+                       epcm->voices[i]->epcm = epcm;
+               }
+       }
+diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c
+index 3bd350afb7434..0d2e3fa21519c 100644
+--- a/sound/soc/atmel/mchp-spdiftx.c
++++ b/sound/soc/atmel/mchp-spdiftx.c
+@@ -196,8 +196,7 @@ struct mchp_spdiftx_dev {
+       struct clk                              *pclk;
+       struct clk                              *gclk;
+       unsigned int                            fmt;
+-      const struct mchp_i2s_caps              *caps;
+-      int                                     gclk_enabled:1;
++      unsigned int                            gclk_enabled:1;
+ };
+ 
+ static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev)
+@@ -766,8 +765,6 @@ static const struct of_device_id mchp_spdiftx_dt_ids[] = {
+ MODULE_DEVICE_TABLE(of, mchp_spdiftx_dt_ids);
+ static int mchp_spdiftx_probe(struct platform_device *pdev)
+ {
+-      struct device_node *np = pdev->dev.of_node;
+-      const struct of_device_id *match;
+       struct mchp_spdiftx_dev *dev;
+       struct resource *mem;
+       struct regmap *regmap;
+@@ -781,11 +778,6 @@ static int mchp_spdiftx_probe(struct platform_device 
*pdev)
+       if (!dev)
+               return -ENOMEM;
+ 
+-      /* Get hardware capabilities. */
+-      match = of_match_node(mchp_spdiftx_dt_ids, np);
+-      if (match)
+-              dev->caps = match->data;
+-
+       /* Map I/O registers. */
+       base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+       if (IS_ERR(base))
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index a3e06a71cf356..6b172db58a310 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -667,7 +667,7 @@ static bool check_delayed_register_option(struct 
snd_usb_audio *chip, int iface)
+               if (delayed_register[i] &&
+                   sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 &&
+                   id == chip->usb_id)
+-                      return inum != iface;
++                      return iface < inum;
+       }
+ 
+       return false;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 6333a2ecb848a..41f5d8242478f 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1911,7 +1911,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio 
*chip, int iface)
+ 
+       for (q = registration_quirks; q->usb_id; q++)
+               if (chip->usb_id == q->usb_id)
+-                      return iface != q->interface;
++                      return iface < q->interface;
+ 
+       /* Register as normal */
+       return false;
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 2f6d39c2ba7c8..c4f4585f9b851 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -496,6 +496,10 @@ static int __snd_usb_add_audio_stream(struct 
snd_usb_audio *chip,
+                       return 0;
+               }
+       }
++
++      if (chip->card->registered)
++              chip->need_delayed_register = true;
++
+       /* look for an empty stream */
+       list_for_each_entry(as, &chip->pcm_list, list) {
+               if (as->fmt_type != fp->fmt_type)
+@@ -503,9 +507,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio 
*chip,
+               subs = &as->substream[stream];
+               if (subs->ep_num)
+                       continue;
+-              if (snd_device_get_state(chip->card, as->pcm) !=
+-                  SNDRV_DEV_BUILD)
+-                      chip->need_delayed_register = true;
+               err = snd_pcm_new_stream(as->pcm, stream, 1);
+               if (err < 0)
+                       return err;
+@@ -1106,7 +1107,7 @@ static int __snd_usb_parse_audio_interface(struct 
snd_usb_audio *chip,
+        * Dallas DS4201 workaround: It presents 5 altsettings, but the last
+        * one misses syncpipe, and does not produce any sound.
+        */
+-      if (chip->usb_id == USB_ID(0x04fa, 0x4201))
++      if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4)
+               num = 4;
+ 
+       for (i = 0; i < num; i++) {

Reply via email to