commit:     ec4d12b78bf27d396119c1a6bad2d74b1fe151c1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun 10 11:58:53 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun 10 11:58:53 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ec4d12b7

Linux patch 5.4.125

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1124_linux-5.4.125.patch | 3019 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3023 insertions(+)

diff --git a/0000_README b/0000_README
index fbcce52..ebe64c3 100644
--- a/0000_README
+++ b/0000_README
@@ -539,6 +539,10 @@ Patch:  1123_linux-5.4.124.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.4.124
 
+Patch:  1124_linux-5.4.125.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.4.125
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1124_linux-5.4.125.patch b/1124_linux-5.4.125.patch
new file mode 100644
index 0000000..21a9802
--- /dev/null
+++ b/1124_linux-5.4.125.patch
@@ -0,0 +1,3019 @@
+diff --git a/Documentation/core-api/xarray.rst 
b/Documentation/core-api/xarray.rst
+index fcedc5349ace4..2ad3c1fce5795 100644
+--- a/Documentation/core-api/xarray.rst
++++ b/Documentation/core-api/xarray.rst
+@@ -461,13 +461,15 @@ or iterations will move the index to the first index in 
the range.
+ Each entry will only be returned once, no matter how many indices it
+ occupies.
+ 
+-Using xas_next() or xas_prev() with a multi-index xa_state
+-is not supported.  Using either of these functions on a multi-index entry
+-will reveal sibling entries; these should be skipped over by the caller.
+-
+-Storing ``NULL`` into any index of a multi-index entry will set the entry
+-at every index to ``NULL`` and dissolve the tie.  Splitting a multi-index
+-entry into entries occupying smaller ranges is not yet supported.
++Using xas_next() or xas_prev() with a multi-index xa_state is not
++supported.  Using either of these functions on a multi-index entry will
++reveal sibling entries; these should be skipped over by the caller.
++
++Storing ``NULL`` into any index of a multi-index entry will set the
++entry at every index to ``NULL`` and dissolve the tie.  A multi-index
++entry can be split into entries occupying smaller ranges by calling
++xas_split_alloc() without the xa_lock held, followed by taking the lock
++and calling xas_split().
+ 
+ Functions and structures
+ ========================
+diff --git a/Makefile b/Makefile
+index 22668742d3d04..43e7b07eea80e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 124
++SUBLEVEL = 125
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi 
b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+index e8d800fec6379..ce4a5a8074422 100644
+--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+@@ -99,9 +99,13 @@
+       phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+       phy-reset-duration = <20>;
+       phy-supply = <&sw2_reg>;
+-      phy-handle = <&ethphy0>;
+       status = "okay";
+ 
++      fixed-link {
++              speed = <1000>;
++              full-duplex;
++      };
++
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi 
b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+index 08a2e17e0539b..621894d13dcbc 100644
+--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+@@ -408,6 +408,18 @@
+       vin-supply = <&sw1_reg>;
+ };
+ 
++&reg_pu {
++      vin-supply = <&sw1_reg>;
++};
++
++&reg_vdd1p1 {
++      vin-supply = <&sw2_reg>;
++};
++
++&reg_vdd2p5 {
++      vin-supply = <&sw2_reg>;
++};
++
+ &uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1>;
+diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi 
b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+index 828cf3e39784a..c4e146f3341bb 100644
+--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+@@ -126,7 +126,7 @@
+               compatible = "nxp,pca8574";
+               reg = <0x3a>;
+               gpio-controller;
+-              #gpio-cells = <1>;
++              #gpio-cells = <2>;
+       };
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts 
b/arch/arm/boot/dts/imx7d-meerkat96.dts
+index 5339210b63d0f..dd8003bd1fc09 100644
+--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
++++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
+@@ -193,7 +193,7 @@
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc1>;
+       keep-power-in-suspend;
+-      tuning-step = <2>;
++      fsl,tuning-step = <2>;
+       vmmc-supply = <&reg_3p3v>;
+       no-1-8-v;
+       broken-cd;
+diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi 
b/arch/arm/boot/dts/imx7d-pico.dtsi
+index 6f50ebf31a0ab..8a8df54ff5639 100644
+--- a/arch/arm/boot/dts/imx7d-pico.dtsi
++++ b/arch/arm/boot/dts/imx7d-pico.dtsi
+@@ -307,7 +307,7 @@
+       pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+       cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+       bus-width = <4>;
+-      tuning-step = <2>;
++      fsl,tuning-step = <2>;
+       vmmc-supply = <&reg_3p3v>;
+       wakeup-source;
+       no-1-8-v;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi 
b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+index bd99fa68b7630..5a2a188debd1d 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+@@ -151,8 +151,8 @@
+               ddr: memory-controller@1080000 {
+                       compatible = "fsl,qoriq-memory-controller";
+                       reg = <0x0 0x1080000 0x0 0x1000>;
+-                      interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+-                      big-endian;
++                      interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
++                      little-endian;
+               };
+ 
+               dcfg: syscon@1e00000 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi 
b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+index 32ce14936b013..f385b143b3086 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+@@ -45,8 +45,8 @@
+       reg_12p0_main: regulator-12p0-main {
+               compatible = "regulator-fixed";
+               regulator-name = "12V_MAIN";
+-              regulator-min-microvolt = <5000000>;
+-              regulator-max-microvolt = <5000000>;
++              regulator-min-microvolt = <12000000>;
++              regulator-max-microvolt = <12000000>;
+               regulator-always-on;
+       };
+ 
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 98a177dd1f89f..da649e90240c8 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -432,14 +432,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
+ {
+-      u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++      u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+ 
+       if (p->is_write)
+               reg_to_dbg(vcpu, p, dbg_reg);
+       else
+               dbg_to_reg(vcpu, p, dbg_reg);
+ 
+-      trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++      trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ 
+       return true;
+ }
+@@ -447,7 +447,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
+ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+               const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+ 
+       if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+@@ -457,7 +457,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *rd,
+ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+       const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+ 
+       if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+@@ -467,21 +467,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *rd,
+ static void reset_bvr(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_desc *rd)
+ {
+-      vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
++      vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
+ }
+ 
+ static bool trap_bcr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
+ {
+-      u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++      u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+ 
+       if (p->is_write)
+               reg_to_dbg(vcpu, p, dbg_reg);
+       else
+               dbg_to_reg(vcpu, p, dbg_reg);
+ 
+-      trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++      trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ 
+       return true;
+ }
+@@ -489,7 +489,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
+ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+               const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+ 
+       if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+@@ -500,7 +500,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *rd,
+ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+       const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+ 
+       if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+@@ -510,22 +510,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *rd,
+ static void reset_bcr(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_desc *rd)
+ {
+-      vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
++      vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
+ }
+ 
+ static bool trap_wvr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
+ {
+-      u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++      u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+ 
+       if (p->is_write)
+               reg_to_dbg(vcpu, p, dbg_reg);
+       else
+               dbg_to_reg(vcpu, p, dbg_reg);
+ 
+-      trace_trap_reg(__func__, rd->reg, p->is_write,
+-              vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
++      trace_trap_reg(__func__, rd->CRm, p->is_write,
++              vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
+ 
+       return true;
+ }
+@@ -533,7 +533,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
+ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+               const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+ 
+       if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+@@ -543,7 +543,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *rd,
+ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+       const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+ 
+       if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+@@ -553,21 +553,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *rd,
+ static void reset_wvr(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_desc *rd)
+ {
+-      vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
++      vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
+ }
+ 
+ static bool trap_wcr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
+ {
+-      u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++      u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ 
+       if (p->is_write)
+               reg_to_dbg(vcpu, p, dbg_reg);
+       else
+               dbg_to_reg(vcpu, p, dbg_reg);
+ 
+-      trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++      trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ 
+       return true;
+ }
+@@ -575,7 +575,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
+ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+               const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ 
+       if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+@@ -585,7 +585,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *rd,
+ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+       const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+-      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++      __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+ 
+       if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+@@ -595,7 +595,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *rd,
+ static void reset_wcr(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_desc *rd)
+ {
+-      vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
++      vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
+ }
+ 
+ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc 
*r)
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 5bef1575708dc..a49b1aeb2147b 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
+ extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
+ extern void lapic_assign_system_vectors(void);
+ extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
++extern void lapic_update_legacy_vectors(void);
+ extern void lapic_online(void);
+ extern void lapic_offline(void);
+ extern bool apic_needs_pit(void);
+diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
+index 9b4df6eaa11a6..f913f62eb6c35 100644
+--- a/arch/x86/include/asm/kvm_para.h
++++ b/arch/x86/include/asm/kvm_para.h
+@@ -6,8 +6,6 @@
+ #include <asm/alternative.h>
+ #include <uapi/asm/kvm_para.h>
+ 
+-extern void kvmclock_init(void);
+-
+ #ifdef CONFIG_KVM_GUEST
+ bool kvm_check_and_clear_guest_paused(void);
+ #else
+@@ -85,13 +83,14 @@ static inline long kvm_hypercall4(unsigned int nr, 
unsigned long p1,
+ }
+ 
+ #ifdef CONFIG_KVM_GUEST
++void kvmclock_init(void);
++void kvmclock_disable(void);
+ bool kvm_para_available(void);
+ unsigned int kvm_arch_para_features(void);
+ unsigned int kvm_arch_para_hints(void);
+ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
+ void kvm_async_pf_task_wake(u32 token);
+ u32 kvm_read_and_reset_pf_reason(void);
+-extern void kvm_disable_steal_time(void);
+ void do_async_page_fault(struct pt_regs *regs, unsigned long error_code, 
unsigned long address);
+ 
+ #ifdef CONFIG_PARAVIRT_SPINLOCKS
+@@ -125,11 +124,6 @@ static inline u32 kvm_read_and_reset_pf_reason(void)
+ {
+       return 0;
+ }
+-
+-static inline void kvm_disable_steal_time(void)
+-{
+-      return;
+-}
+ #endif
+ 
+ #endif /* _ASM_X86_KVM_PARA_H */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 7fafa859e9f25..4e4476b832be2 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2579,6 +2579,7 @@ static void __init apic_bsp_setup(bool upmode)
+       end_local_APIC_setup();
+       irq_remap_enable_fault_handling();
+       setup_IO_APIC();
++      lapic_update_legacy_vectors();
+ }
+ 
+ #ifdef CONFIG_UP_LATE_INIT
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index bf6662d37a334..6b8b6bf6c5d1c 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -680,6 +680,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool 
replace)
+       irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
+ }
+ 
++void __init lapic_update_legacy_vectors(void)
++{
++      unsigned int i;
++
++      if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
++              return;
++
++      /*
++       * If the IO/APIC is disabled via config, kernel command line or
++       * lack of enumeration then all legacy interrupts are routed
++       * through the PIC. Make sure that they are marked as legacy
++       * vectors. PIC_CASCADE_IRQ has already been marked in
++       * lapic_assign_system_vectors().
++       */
++      for (i = 0; i < nr_legacy_irqs(); i++) {
++              if (i != PIC_CASCADE_IR)
++                      lapic_assign_legacy_vector(i, true);
++      }
++}
++
+ void __init lapic_assign_system_vectors(void)
+ {
+       unsigned int i, vector = 0;
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index e820568ed4d5c..6ff2c7cac4c46 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -24,6 +24,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/nmi.h>
+ #include <linux/swait.h>
++#include <linux/syscore_ops.h>
+ #include <asm/timer.h>
+ #include <asm/cpu.h>
+ #include <asm/traps.h>
+@@ -33,6 +34,7 @@
+ #include <asm/apicdef.h>
+ #include <asm/hypervisor.h>
+ #include <asm/tlb.h>
++#include <asm/reboot.h>
+ 
+ static int kvmapf = 1;
+ 
+@@ -351,6 +353,14 @@ static void kvm_pv_disable_apf(void)
+              smp_processor_id());
+ }
+ 
++static void kvm_disable_steal_time(void)
++{
++      if (!has_steal_clock)
++              return;
++
++      wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
++}
++
+ static void kvm_pv_guest_cpu_reboot(void *unused)
+ {
+       /*
+@@ -393,14 +403,6 @@ static u64 kvm_steal_clock(int cpu)
+       return steal;
+ }
+ 
+-void kvm_disable_steal_time(void)
+-{
+-      if (!has_steal_clock)
+-              return;
+-
+-      wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
+-}
+-
+ static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
+ {
+       early_set_memory_decrypted((unsigned long) ptr, size);
+@@ -428,6 +430,27 @@ static void __init sev_map_percpu_data(void)
+       }
+ }
+ 
++static void kvm_guest_cpu_offline(bool shutdown)
++{
++      kvm_disable_steal_time();
++      if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
++              wrmsrl(MSR_KVM_PV_EOI_EN, 0);
++      kvm_pv_disable_apf();
++      if (!shutdown)
++              apf_task_wake_all();
++      kvmclock_disable();
++}
++
++static int kvm_cpu_online(unsigned int cpu)
++{
++      unsigned long flags;
++
++      local_irq_save(flags);
++      kvm_guest_cpu_init();
++      local_irq_restore(flags);
++      return 0;
++}
++
+ #ifdef CONFIG_SMP
+ #define KVM_IPI_CLUSTER_SIZE  (2 * BITS_PER_LONG)
+ 
+@@ -547,29 +570,46 @@ static void __init kvm_smp_prepare_boot_cpu(void)
+       kvm_spinlock_init();
+ }
+ 
+-static void kvm_guest_cpu_offline(void)
++static int kvm_cpu_down_prepare(unsigned int cpu)
+ {
+-      kvm_disable_steal_time();
+-      if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+-              wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+-      kvm_pv_disable_apf();
+-      apf_task_wake_all();
++      unsigned long flags;
++
++      local_irq_save(flags);
++      kvm_guest_cpu_offline(false);
++      local_irq_restore(flags);
++      return 0;
+ }
+ 
+-static int kvm_cpu_online(unsigned int cpu)
++#endif
++
++static int kvm_suspend(void)
+ {
+-      local_irq_disable();
+-      kvm_guest_cpu_init();
+-      local_irq_enable();
++      kvm_guest_cpu_offline(false);
++
+       return 0;
+ }
+ 
+-static int kvm_cpu_down_prepare(unsigned int cpu)
++static void kvm_resume(void)
+ {
+-      local_irq_disable();
+-      kvm_guest_cpu_offline();
+-      local_irq_enable();
+-      return 0;
++      kvm_cpu_online(raw_smp_processor_id());
++}
++
++static struct syscore_ops kvm_syscore_ops = {
++      .suspend        = kvm_suspend,
++      .resume         = kvm_resume,
++};
++
++/*
++ * After a PV feature is registered, the host will keep writing to the
++ * registered memory location. If the guest happens to shutdown, this memory
++ * won't be valid. In cases like kexec, in which you install a new kernel, 
this
++ * means a random memory location will be kept being written.
++ */
++#ifdef CONFIG_KEXEC_CORE
++static void kvm_crash_shutdown(struct pt_regs *regs)
++{
++      kvm_guest_cpu_offline(true);
++      native_machine_crash_shutdown(regs);
+ }
+ #endif
+ 
+@@ -649,6 +689,12 @@ static void __init kvm_guest_init(void)
+       kvm_guest_cpu_init();
+ #endif
+ 
++#ifdef CONFIG_KEXEC_CORE
++      machine_ops.crash_shutdown = kvm_crash_shutdown;
++#endif
++
++      register_syscore_ops(&kvm_syscore_ops);
++
+       /*
+        * Hard lockup detection is enabled by default. Disable it, as guests
+        * can get false positives too easily, for example if the host is
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 904494b924c13..4a0802af2e3e0 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -20,7 +20,6 @@
+ #include <asm/hypervisor.h>
+ #include <asm/mem_encrypt.h>
+ #include <asm/x86_init.h>
+-#include <asm/reboot.h>
+ #include <asm/kvmclock.h>
+ 
+ static int kvmclock __initdata = 1;
+@@ -197,28 +196,9 @@ static void kvm_setup_secondary_clock(void)
+ }
+ #endif
+ 
+-/*
+- * After the clock is registered, the host will keep writing to the
+- * registered memory location. If the guest happens to shutdown, this memory
+- * won't be valid. In cases like kexec, in which you install a new kernel, 
this
+- * means a random memory location will be kept being written. So before any
+- * kind of shutdown from our side, we unregister the clock by writing anything
+- * that does not have the 'enable' bit set in the msr
+- */
+-#ifdef CONFIG_KEXEC_CORE
+-static void kvm_crash_shutdown(struct pt_regs *regs)
+-{
+-      native_write_msr(msr_kvm_system_time, 0, 0);
+-      kvm_disable_steal_time();
+-      native_machine_crash_shutdown(regs);
+-}
+-#endif
+-
+-static void kvm_shutdown(void)
++void kvmclock_disable(void)
+ {
+       native_write_msr(msr_kvm_system_time, 0, 0);
+-      kvm_disable_steal_time();
+-      native_machine_shutdown();
+ }
+ 
+ static void __init kvmclock_init_mem(void)
+@@ -346,10 +326,6 @@ void __init kvmclock_init(void)
+ #endif
+       x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
+       x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
+-      machine_ops.shutdown  = kvm_shutdown;
+-#ifdef CONFIG_KEXEC_CORE
+-      machine_ops.crash_shutdown  = kvm_crash_shutdown;
+-#endif
+       kvm_get_preset_lpj();
+ 
+       /*
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index b9d14fdbd2d81..074cd170912aa 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4057,7 +4057,7 @@ static int cr_interception(struct vcpu_svm *svm)
+       err = 0;
+       if (cr >= 16) { /* mov to cr */
+               cr -= 16;
+-              val = kvm_register_read(&svm->vcpu, reg);
++              val = kvm_register_readl(&svm->vcpu, reg);
+               switch (cr) {
+               case 0:
+                       if (!check_selective_cr0_intercepted(svm, val))
+@@ -4102,7 +4102,7 @@ static int cr_interception(struct vcpu_svm *svm)
+                       kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+                       return 1;
+               }
+-              kvm_register_write(&svm->vcpu, reg, val);
++              kvm_register_writel(&svm->vcpu, reg, val);
+       }
+       return kvm_complete_insn_gp(&svm->vcpu, err);
+ }
+@@ -4132,13 +4132,13 @@ static int dr_interception(struct vcpu_svm *svm)
+       if (dr >= 16) { /* mov to DRn */
+               if (!kvm_require_dr(&svm->vcpu, dr - 16))
+                       return 1;
+-              val = kvm_register_read(&svm->vcpu, reg);
++              val = kvm_register_readl(&svm->vcpu, reg);
+               kvm_set_dr(&svm->vcpu, dr - 16, val);
+       } else {
+               if (!kvm_require_dr(&svm->vcpu, dr))
+                       return 1;
+               kvm_get_dr(&svm->vcpu, dr, &val);
+-              kvm_register_write(&svm->vcpu, reg, val);
++              kvm_register_writel(&svm->vcpu, reg, val);
+       }
+ 
+       return kvm_skip_emulated_instruction(&svm->vcpu);
+diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
+index 4c0d4e4341961..72d2c0b656339 100644
+--- a/drivers/acpi/acpica/utdelete.c
++++ b/drivers/acpi/acpica/utdelete.c
+@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union 
acpi_operand_object *object)
+               }
+               break;
+ 
++      case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
++
++              ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
++                                "***** Address handler %p\n", object));
++
++              acpi_os_delete_mutex(object->address_space.context_mutex);
++              break;
++
+       default:
+ 
+               break;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index d59e1ca9990b6..90053c4a8290d 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1376,9 +1376,9 @@ static const struct sysc_revision_quirk 
sysc_revision_quirks[] = {
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+       /* Uarts on omap4 and later */
+       SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+-                 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
++                 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+       SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+-                 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
++                 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ 
+       /* Quirks that need to be set based on the module address */
+       SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 
0xffffffff,
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index b1af0de2e1008..e48298687b76d 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -263,8 +263,7 @@ static int cper_dimm_err_location(struct 
cper_mem_err_compact *mem, char *msg)
+       if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
+               return 0;
+ 
+-      n = 0;
+-      len = CPER_REC_LEN - 1;
++      len = CPER_REC_LEN;
+       dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
+       if (bank && device)
+               n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
+@@ -273,7 +272,6 @@ static int cper_dimm_err_location(struct 
cper_mem_err_compact *mem, char *msg)
+                            "DIMM location: not present. DMI handle: 0x%.4x ",
+                            mem->mem_dev_handle);
+ 
+-      msg[n] = '\0';
+       return n;
+ }
+ 
+diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
+index 58452fde92cc0..5d343dc8e5354 100644
+--- a/drivers/firmware/efi/memattr.c
++++ b/drivers/firmware/efi/memattr.c
+@@ -66,11 +66,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, 
efi_memory_desc_t *out)
+               return false;
+       }
+ 
+-      if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+-              pr_warn("Entry attributes invalid: RO and XP bits both 
cleared\n");
+-              return false;
+-      }
+-
+       if (PAGE_SIZE > EFI_PAGE_SIZE &&
+           (!PAGE_ALIGNED(in->phys_addr) ||
+            !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 2cdaf3b2a7217..39ca0718ced0c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -351,7 +351,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+ {
+       struct amdgpu_ctx *ctx;
+       struct amdgpu_ctx_mgr *mgr;
+-      unsigned long ras_counter;
+ 
+       if (!fpriv)
+               return -EINVAL;
+@@ -376,21 +375,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+       if (atomic_read(&ctx->guilty))
+               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+ 
+-      /*query ue count*/
+-      ras_counter = amdgpu_ras_query_error_count(adev, false);
+-      /*ras counter is monotonic increasing*/
+-      if (ras_counter != ctx->ras_counter_ue) {
+-              out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+-              ctx->ras_counter_ue = ras_counter;
+-      }
+-
+-      /*query ce count*/
+-      ras_counter = amdgpu_ras_query_error_count(adev, true);
+-      if (ras_counter != ctx->ras_counter_ce) {
+-              out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+-              ctx->ras_counter_ce = ras_counter;
+-      }
+-
+       mutex_unlock(&mgr->lock);
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 217084d56ab8c..9deef20a02699 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -354,6 +354,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring 
*ring, long timeout)
+ 
+ error:
+       dma_fence_put(fence);
++      amdgpu_bo_unpin(bo);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
+       return r;
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index abd86903875f0..fc4c074597539 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -597,7 +597,7 @@ static int magicmouse_probe(struct hid_device *hdev,
+       if (id->vendor == USB_VENDOR_ID_APPLE &&
+           id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+           hdev->type != HID_TYPE_USBMOUSE)
+-              return 0;
++              return -ENODEV;
+ 
+       msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
+       if (msc == NULL) {
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index d91e6679afb18..f290ba856323a 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -611,9 +611,13 @@ static struct mt_report_data 
*mt_allocate_report_data(struct mt_device *td,
+               if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+                       continue;
+ 
+-              for (n = 0; n < field->report_count; n++) {
+-                      if (field->usage[n].hid == HID_DG_CONTACTID)
+-                              rdata->is_mt_collection = true;
++              if (field->logical == HID_DG_FINGER || td->hdev->group != 
HID_GROUP_MULTITOUCH_WIN_8) {
++                      for (n = 0; n < field->report_count; n++) {
++                              if (field->usage[n].hid == HID_DG_CONTACTID) {
++                                      rdata->is_mt_collection = true;
++                                      break;
++                              }
++                      }
+               }
+       }
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c 
b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 96898983db990..6f7a3702b5fba 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -50,6 +50,7 @@
+ #define I2C_HID_QUIRK_BOGUS_IRQ                       BIT(4)
+ #define I2C_HID_QUIRK_RESET_ON_RESUME         BIT(5)
+ #define I2C_HID_QUIRK_BAD_INPUT_SIZE          BIT(6)
++#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET   BIT(7)
+ 
+ 
+ /* flags */
+@@ -185,6 +186,11 @@ static const struct i2c_hid_quirks {
+                I2C_HID_QUIRK_RESET_ON_RESUME },
+       { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+               I2C_HID_QUIRK_BAD_INPUT_SIZE },
++      /*
++       * Sending the wakeup after reset actually break ELAN touchscreen 
controller
++       */
++      { USB_VENDOR_ID_ELAN, HID_ANY_ID,
++               I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
+       { 0, 0 }
+ };
+ 
+@@ -468,7 +474,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+       }
+ 
+       /* At least some SIS devices need this after reset */
+-      ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
++      if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
++              ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ 
+ out_unlock:
+       mutex_unlock(&ihid->reset_lock);
+@@ -1114,8 +1121,8 @@ static int i2c_hid_probe(struct i2c_client *client,
+       hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+       hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+ 
+-      snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
+-               client->name, hid->vendor, hid->product);
++      snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
++               client->name, (u16)hid->vendor, (u16)hid->product);
+       strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+ 
+       ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
+index fddac7c72f645..07a9fe97d2e05 100644
+--- a/drivers/hid/usbhid/hid-pidff.c
++++ b/drivers/hid/usbhid/hid-pidff.c
+@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
+ 
+       if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
+           pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
++              error = -EPERM;
+               hid_notice(hid,
+                          "device does not support device managed pool\n");
+               goto fail;
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index 4212d022d2534..35c00420d855b 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -792,10 +792,10 @@ static struct attribute *i8k_attrs[] = {
+ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
+                             int index)
+ {
+-      if (disallow_fan_support && index >= 8)
++      if (disallow_fan_support && index >= 20)
+               return 0;
+       if (disallow_fan_type_call &&
+-          (index == 9 || index == 12 || index == 15))
++          (index == 21 || index == 25 || index == 28))
+               return 0;
+       if (index >= 0 && index <= 1 &&
+           !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c 
b/drivers/i2c/busses/i2c-qcom-geni.c
+index b56a427fb928f..c73b997899af8 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -641,6 +641,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
+       return 0;
+ }
+ 
++static void geni_i2c_shutdown(struct platform_device *pdev)
++{
++      struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
++
++      /* Make client i2c transfers start failing */
++      i2c_mark_adapter_suspended(&gi2c->adap);
++}
++
+ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
+ {
+       int ret;
+@@ -677,6 +685,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct 
device *dev)
+ {
+       struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+ 
++      i2c_mark_adapter_suspended(&gi2c->adap);
++
+       if (!gi2c->suspended) {
+               geni_i2c_runtime_suspend(dev);
+               pm_runtime_disable(dev);
+@@ -686,8 +696,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct 
device *dev)
+       return 0;
+ }
+ 
++static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
++{
++      struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
++
++      i2c_mark_adapter_resumed(&gi2c->adap);
++      return 0;
++}
++
+ static const struct dev_pm_ops geni_i2c_pm_ops = {
+-      SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
++      SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, 
geni_i2c_resume_noirq)
+       SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
+                                                                       NULL)
+ };
+@@ -701,6 +719,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
+ static struct platform_driver geni_i2c_driver = {
+       .probe  = geni_i2c_probe,
+       .remove = geni_i2c_remove,
++      .shutdown = geni_i2c_shutdown,
+       .driver = {
+               .name = "geni_i2c",
+               .pm = &geni_i2c_pm_ops,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 0dba28bb309a2..00ae7a9a42bfe 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7003,7 +7003,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
+ 
+               pf->fw_fid = le16_to_cpu(resp->fid);
+               pf->port_id = le16_to_cpu(resp->port_id);
+-              bp->dev->dev_port = pf->port_id;
+               memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
+               pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+               pf->max_vfs = le16_to_cpu(resp->max_vfs);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c 
b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 218aada8949d9..68a2fcf4c0bf5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -2233,15 +2233,20 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring 
*rx_ring,
+       case XDP_TX:
+               xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+               result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
++              if (result == I40E_XDP_CONSUMED)
++                      goto out_failure;
+               break;
+       case XDP_REDIRECT:
+               err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+-              result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
++              if (err)
++                      goto out_failure;
++              result = I40E_XDP_REDIR;
+               break;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+               /* fall through */
+       case XDP_ABORTED:
++out_failure:
+               trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+               /* fall through -- handle aborts by dropping packet */
+       case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c 
b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index c9d4534fbdf02..a9ad788c4913d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -212,21 +212,28 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, 
struct xdp_buff *xdp)
+ 
+       xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
+ 
++      if (likely(act == XDP_REDIRECT)) {
++              err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
++              if (err)
++                      goto out_failure;
++              rcu_read_unlock();
++              return I40E_XDP_REDIR;
++      }
++
+       switch (act) {
+       case XDP_PASS:
+               break;
+       case XDP_TX:
+               xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+               result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+-              break;
+-      case XDP_REDIRECT:
+-              err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+-              result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
++              if (result == I40E_XDP_CONSUMED)
++                      goto out_failure;
+               break;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+               /* fall through */
+       case XDP_ABORTED:
++out_failure:
+               trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+               /* fallthrough -- handle aborts by dropping packet */
+       case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h 
b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+index 9138b19de87e0..f2bb83af4d9e8 100644
+--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+@@ -34,6 +34,7 @@
+ #define PF_FW_ATQLEN_ATQOVFL_M                        BIT(29)
+ #define PF_FW_ATQLEN_ATQCRIT_M                        BIT(30)
+ #define VF_MBX_ARQLEN(_VF)                    (0x0022BC00 + ((_VF) * 4))
++#define VF_MBX_ATQLEN(_VF)                    (0x0022A800 + ((_VF) * 4))
+ #define PF_FW_ATQLEN_ATQENABLE_M              BIT(31)
+ #define PF_FW_ATQT                            0x00080400
+ #define PF_MBX_ARQBAH                         0x0022E400
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c 
b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 33dd103035dcd..2b55efe5ed963 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -2109,6 +2109,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring 
*tx_ring)
+       struct ice_tx_offload_params offload = { 0 };
+       struct ice_vsi *vsi = tx_ring->vsi;
+       struct ice_tx_buf *first;
++      struct ethhdr *eth;
+       unsigned int count;
+       int tso, csum;
+ 
+@@ -2156,7 +2157,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring 
*tx_ring)
+               goto out_drop;
+ 
+       /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
+-      if (unlikely(skb->priority == TC_PRIO_CONTROL &&
++      eth = (struct ethhdr *)skb_mac_header(skb);
++      if (unlikely((skb->priority == TC_PRIO_CONTROL ||
++                    eth->h_proto == htons(ETH_P_LLDP)) &&
+                    vsi->type == ICE_VSI_PF &&
+                    vsi->port_info->is_sw_lldp))
+               offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 
b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index e92a00a617556..5e97fdca5fab2 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -384,13 +384,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool 
is_vflr, bool is_pfr)
+        */
+       clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ 
+-      /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
+-       * in the case of VFR. If this is done for PFR, it can mess up VF
+-       * resets because the VF driver may already have started cleanup
+-       * by the time we get here.
++      /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
++       * needs to clear them in the case of VFR/VFLR. If this is done for
++       * PFR, it can mess up VF resets because the VF driver may already
++       * have started cleanup by the time we get here.
+        */
+-      if (!is_pfr)
+-              wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
++      if (!is_pfr) {
++              wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
++              wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
++      }
+ 
+       /* In the case of a VFLR, the HW has already reset the VF and we
+        * just need to clean up, so don't hit the VFRTRIG register.
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 
b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index 64ec0e7c64b49..be8e6d4e376ec 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -1079,11 +1079,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct 
ixgbevf_adapter *adapter,
+       case XDP_TX:
+               xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
+               result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
++              if (result == IXGBEVF_XDP_CONSUMED)
++                      goto out_failure;
+               break;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+               /* fallthrough */
+       case XDP_ABORTED:
++out_failure:
+               trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+               /* fallthrough -- handle aborts by dropping packet */
+       case XDP_DROP:
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index cbe7f35eac982..0646bcd269682 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1589,6 +1589,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
+       uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
+       uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
+ 
++      /* if the speed hasn't changed, don't report it.
++       * RTL8156 shipped before 2021 sends notification about every 32ms.
++       */
++      if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
++              return;
++
++      dev->rx_speed = rx_speed;
++      dev->tx_speed = tx_speed;
++
+       /*
+        * Currently the USB-NET API does not support reporting the actual
+        * device speed. Do print it instead.
+@@ -1629,7 +1638,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct 
urb *urb)
+                * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
+                * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
+                */
+-              usbnet_link_change(dev, !!event->wValue, 0);
++              if (netif_carrier_ok(dev->net) != !!event->wValue)
++                      usbnet_link_change(dev, !!event->wValue, 0);
+               break;
+ 
+       case USB_CDC_NOTIFY_SPEED_CHANGE:
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 9da27ec22d588..44d74584c7275 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -712,7 +712,11 @@ static inline void dwc_handle_gpwrdn_disc_det(struct 
dwc2_hsotg *hsotg,
+       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ 
+       hsotg->hibernated = 0;
++
++#if IS_ENABLED(CONFIG_USB_DWC2_HOST) ||       \
++      IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+       hsotg->bus_suspended = 0;
++#endif
+ 
+       if (gpwrdn & GPWRDN_IDSTS) {
+               hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
+index 4abddbebd4b23..c691127bc805a 100644
+--- a/drivers/vfio/pci/Kconfig
++++ b/drivers/vfio/pci/Kconfig
+@@ -2,6 +2,7 @@
+ config VFIO_PCI
+       tristate "VFIO support for PCI devices"
+       depends on VFIO && PCI && EVENTFD
++      depends on MMU
+       select VFIO_VIRQFD
+       select IRQ_BYPASS_MANAGER
+       help
+diff --git a/drivers/vfio/pci/vfio_pci_config.c 
b/drivers/vfio/pci/vfio_pci_config.c
+index bf32997c557ff..50cd17fcf7541 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -1576,7 +1576,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
+                       if (len == 0xFF) {
+                               len = vfio_ext_cap_len(vdev, ecap, epos);
+                               if (len < 0)
+-                                      return ret;
++                                      return len;
+                       }
+               }
+ 
+diff --git a/drivers/vfio/platform/vfio_platform_common.c 
b/drivers/vfio/platform/vfio_platform_common.c
+index 152e5188183ce..6f727034679f1 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -289,7 +289,7 @@ err_irq:
+       vfio_platform_regions_cleanup(vdev);
+ err_reg:
+       mutex_unlock(&driver_lock);
+-      module_put(THIS_MODULE);
++      module_put(vdev->parent_module);
+       return ret;
+ }
+ 
+diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
+index f6ba18191c0f9..30313084f06c1 100644
+--- a/drivers/xen/xen-pciback/vpci.c
++++ b/drivers/xen/xen-pciback/vpci.c
+@@ -69,7 +69,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device 
*pdev,
+                                  struct pci_dev *dev, int devid,
+                                  publish_pci_dev_cb publish_cb)
+ {
+-      int err = 0, slot, func = -1;
++      int err = 0, slot, func = PCI_FUNC(dev->devfn);
+       struct pci_dev_entry *t, *dev_entry;
+       struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ 
+@@ -94,23 +94,26 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device 
*pdev,
+ 
+       /*
+        * Keep multi-function devices together on the virtual PCI bus, except
+-       * virtual functions.
++       * that we want to keep virtual functions at func 0 on their own. They
++       * aren't multi-function devices and hence their presence at func 0
++       * may cause guests to not scan the other functions.
+        */
+-      if (!dev->is_virtfn) {
++      if (!dev->is_virtfn || func) {
+               for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+                       if (list_empty(&vpci_dev->dev_list[slot]))
+                               continue;
+ 
+                       t = list_entry(list_first(&vpci_dev->dev_list[slot]),
+                                      struct pci_dev_entry, list);
++                      if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
++                              continue;
+ 
+                       if (match_slot(dev, t->dev)) {
+                               pr_info("vpci: %s: assign to virtual slot %d 
func %d\n",
+                                       pci_name(dev), slot,
+-                                      PCI_FUNC(dev->devfn));
++                                      func);
+                               list_add_tail(&dev_entry->list,
+                                             &vpci_dev->dev_list[slot]);
+-                              func = PCI_FUNC(dev->devfn);
+                               goto unlock;
+                       }
+               }
+@@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device 
*pdev,
+                               pci_name(dev), slot);
+                       list_add_tail(&dev_entry->list,
+                                     &vpci_dev->dev_list[slot]);
+-                      func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
+                       goto unlock;
+               }
+       }
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 6317394f02b86..832b40293907f 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1338,16 +1338,20 @@ int btrfs_discard_extent(struct btrfs_fs_info 
*fs_info, u64 bytenr,
+               for (i = 0; i < bbio->num_stripes; i++, stripe++) {
+                       u64 bytes;
+                       struct request_queue *req_q;
++                      struct btrfs_device *device = stripe->dev;
+ 
+-                      if (!stripe->dev->bdev) {
++                      if (!device->bdev) {
+                               ASSERT(btrfs_test_opt(fs_info, DEGRADED));
+                               continue;
+                       }
+-                      req_q = bdev_get_queue(stripe->dev->bdev);
++                      req_q = bdev_get_queue(device->bdev);
+                       if (!blk_queue_discard(req_q))
+                               continue;
+ 
+-                      ret = btrfs_issue_discard(stripe->dev->bdev,
++                      if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 
&device->dev_state))
++                              continue;
++
++                      ret = btrfs_issue_discard(device->bdev,
+                                                 stripe->physical,
+                                                 stripe->length,
+                                                 &bytes);
+@@ -1879,7 +1883,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle 
*trans,
+       trace_run_delayed_ref_head(fs_info, head, 0);
+       btrfs_delayed_ref_unlock(head);
+       btrfs_put_delayed_ref_head(head);
+-      return 0;
++      return ret;
+ }
+ 
+ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 2b8f29c07668b..61b82c69eed50 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -599,7 +599,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+       u64 end_byte = bytenr + len;
+       u64 csum_end;
+       struct extent_buffer *leaf;
+-      int ret;
++      int ret = 0;
+       u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+       int blocksize_bits = fs_info->sb->s_blocksize_bits;
+ 
+@@ -618,6 +618,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+               path->leave_spinning = 1;
+               ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+               if (ret > 0) {
++                      ret = 0;
+                       if (path->slots[0] == 0)
+                               break;
+                       path->slots[0]--;
+@@ -674,7 +675,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+                       ret = btrfs_del_items(trans, root, path,
+                                             path->slots[0], del_nr);
+                       if (ret)
+-                              goto out;
++                              break;
+                       if (key.offset == bytenr)
+                               break;
+               } else if (key.offset < bytenr && csum_end > end_byte) {
+@@ -718,8 +719,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+                       ret = btrfs_split_item(trans, root, path, &key, offset);
+                       if (ret && ret != -EAGAIN) {
+                               btrfs_abort_transaction(trans, ret);
+-                              goto out;
++                              break;
+                       }
++                      ret = 0;
+ 
+                       key.offset = end_byte - 1;
+               } else {
+@@ -729,8 +731,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+               }
+               btrfs_release_path(path);
+       }
+-      ret = 0;
+-out:
+       btrfs_free_path(path);
+       return ret;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8ea9559c1919a..64dd702a5448c 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3359,6 +3359,18 @@ out:
+       if (ret || truncated) {
+               u64 start, end;
+ 
++              /*
++               * If we failed to finish this ordered extent for any reason we
++               * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
++               * extent, and mark the inode with the error if it wasn't
++               * already set.  Any error during writeback would have already
++               * set the mapping error, so we need to set it if we're the ones
++               * marking this ordered extent as failed.
++               */
++              if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
++                                           &ordered_extent->flags))
++                      mapping_set_error(ordered_extent->inode->i_mapping, 
-EIO);
++
+               if (truncated)
+                       start = ordered_extent->file_offset + logical_len;
+               else
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 7d06842a3d747..368c43c6cbd08 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1285,22 +1285,14 @@ static int check_extent_data_ref(struct extent_buffer 
*leaf,
+               return -EUCLEAN;
+       }
+       for (; ptr < end; ptr += sizeof(*dref)) {
+-              u64 root_objectid;
+-              u64 owner;
+               u64 offset;
+-              u64 hash;
+ 
++              /*
++               * We cannot check the extent_data_ref hash due to possible
++               * overflow from the leaf due to hash collisions.
++               */
+               dref = (struct btrfs_extent_data_ref *)ptr;
+-              root_objectid = btrfs_extent_data_ref_root(leaf, dref);
+-              owner = btrfs_extent_data_ref_objectid(leaf, dref);
+               offset = btrfs_extent_data_ref_offset(leaf, dref);
+-              hash = hash_extent_data_ref(root_objectid, owner, offset);
+-              if (hash != key->offset) {
+-                      extent_err(leaf, slot,
+-      "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
+-                                 hash, key->offset);
+-                      return -EUCLEAN;
+-              }
+               if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
+                       extent_err(leaf, slot,
+       "invalid extent data backref offset, have %llu expect aligned to %u",
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 54647eb9c6ed2..4ff381c23cefc 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1775,6 +1775,7 @@ static noinline int fixup_inode_link_counts(struct 
btrfs_trans_handle *trans,
+                       break;
+ 
+               if (ret == 1) {
++                      ret = 0;
+                       if (path->slots[0] == 0)
+                               break;
+                       path->slots[0]--;
+@@ -1787,17 +1788,19 @@ static noinline int fixup_inode_link_counts(struct 
btrfs_trans_handle *trans,
+ 
+               ret = btrfs_del_item(trans, root, path);
+               if (ret)
+-                      goto out;
++                      break;
+ 
+               btrfs_release_path(path);
+               inode = read_one_inode(root, key.offset);
+-              if (!inode)
+-                      return -EIO;
++              if (!inode) {
++                      ret = -EIO;
++                      break;
++              }
+ 
+               ret = fixup_inode_link_count(trans, root, inode);
+               iput(inode);
+               if (ret)
+-                      goto out;
++                      break;
+ 
+               /*
+                * fixup on a directory may create new entries,
+@@ -1806,8 +1809,6 @@ static noinline int fixup_inode_link_counts(struct 
btrfs_trans_handle *trans,
+                */
+               key.offset = (u64)-1;
+       }
+-      ret = 0;
+-out:
+       btrfs_release_path(path);
+       return ret;
+ }
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 3193f0b4a02d6..dbd0d7a101541 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3378,7 +3378,10 @@ static int ext4_split_extent_at(handle_t *handle,
+               ext4_ext_mark_unwritten(ex2);
+ 
+       err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
+-      if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
++      if (err != -ENOSPC && err != -EDQUOT)
++              goto out;
++
++      if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+               if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+                       if (split_flag & EXT4_EXT_DATA_VALID1) {
+                               err = ext4_ext_zeroout(inode, ex2);
+@@ -3404,30 +3407,30 @@ static int ext4_split_extent_at(handle_t *handle,
+                                             ext4_ext_pblock(&orig_ex));
+               }
+ 
+-              if (err)
+-                      goto fix_extent_len;
+-              /* update the extent length and mark as initialized */
+-              ex->ee_len = cpu_to_le16(ee_len);
+-              ext4_ext_try_to_merge(handle, inode, path, ex);
+-              err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+-              if (err)
+-                      goto fix_extent_len;
+-
+-              /* update extent status tree */
+-              err = ext4_zeroout_es(inode, &zero_ex);
+-
+-              goto out;
+-      } else if (err)
+-              goto fix_extent_len;
+-
+-out:
+-      ext4_ext_show_leaf(inode, path);
+-      return err;
++              if (!err) {
++                      /* update the extent length and mark as initialized */
++                      ex->ee_len = cpu_to_le16(ee_len);
++                      ext4_ext_try_to_merge(handle, inode, path, ex);
++                      err = ext4_ext_dirty(handle, inode, path + 
path->p_depth);
++                      if (!err)
++                              /* update extent status tree */
++                              err = ext4_zeroout_es(inode, &zero_ex);
++                      /* If we failed at this point, we don't know in which
++                       * state the extent tree exactly is so don't try to fix
++                       * length of the original extent as it may do even more
++                       * damage.
++                       */
++                      goto out;
++              }
++      }
+ 
+ fix_extent_len:
+       ex->ee_len = orig_ex.ee_len;
+       ext4_ext_dirty(handle, inode, path + path->p_depth);
+       return err;
++out:
++      ext4_ext_show_leaf(inode, path);
++      return err;
+ }
+ 
+ /*
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index ab2b0d74ad03e..c2852d7cc14d4 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1855,6 +1855,45 @@ out:
+       return ret;
+ }
+ 
++/*
++ * zero out partial blocks of one cluster.
++ *
++ * start: file offset where zero starts, will be made upper block aligned.
++ * len: it will be trimmed to the end of current cluster if "start + len"
++ *      is bigger than it.
++ */
++static int ocfs2_zeroout_partial_cluster(struct inode *inode,
++                                      u64 start, u64 len)
++{
++      int ret;
++      u64 start_block, end_block, nr_blocks;
++      u64 p_block, offset;
++      u32 cluster, p_cluster, nr_clusters;
++      struct super_block *sb = inode->i_sb;
++      u64 end = ocfs2_align_bytes_to_clusters(sb, start);
++
++      if (start + len < end)
++              end = start + len;
++
++      start_block = ocfs2_blocks_for_bytes(sb, start);
++      end_block = ocfs2_blocks_for_bytes(sb, end);
++      nr_blocks = end_block - start_block;
++      if (!nr_blocks)
++              return 0;
++
++      cluster = ocfs2_bytes_to_clusters(sb, start);
++      ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
++                              &nr_clusters, NULL);
++      if (ret)
++              return ret;
++      if (!p_cluster)
++              return 0;
++
++      offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
++      p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
++      return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
++}
++
+ /*
+  * Parts of this function taken from xfs_change_file_space()
+  */
+@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, 
struct inode *inode,
+ {
+       int ret;
+       s64 llen;
+-      loff_t size;
++      loff_t size, orig_isize;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct buffer_head *di_bh = NULL;
+       handle_t *handle;
+@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, 
struct inode *inode,
+               goto out_inode_unlock;
+       }
+ 
++      orig_isize = i_size_read(inode);
+       switch (sr->l_whence) {
+       case 0: /*SEEK_SET*/
+               break;
+@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, 
struct inode *inode,
+               sr->l_start += f_pos;
+               break;
+       case 2: /*SEEK_END*/
+-              sr->l_start += i_size_read(inode);
++              sr->l_start += orig_isize;
+               break;
+       default:
+               ret = -EINVAL;
+@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, 
struct inode *inode,
+       default:
+               ret = -EINVAL;
+       }
++
++      /* zeroout eof blocks in the cluster. */
++      if (!ret && change_size && orig_isize < size) {
++              ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
++                                      size - orig_isize);
++              if (!ret)
++                      i_size_write(inode, size);
++      }
+       up_write(&OCFS2_I(inode)->ip_alloc_sem);
+       if (ret) {
+               mlog_errno(ret);
+@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, 
struct inode *inode,
+               goto out_inode_unlock;
+       }
+ 
+-      if (change_size && i_size_read(inode) < size)
+-              i_size_write(inode, size);
+-
+       inode->i_ctime = inode->i_mtime = current_time(inode);
+       ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
+       if (ret < 0)
+diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
+index 93d5cf0bc7168..d8b86fd391134 100644
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -231,6 +231,19 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
+       else
+               return NULL;
+ }
++
++/**
++ * thp_order - Order of a transparent huge page.
++ * @page: Head page of a transparent huge page.
++ */
++static inline unsigned int thp_order(struct page *page)
++{
++      VM_BUG_ON_PGFLAGS(PageTail(page), page);
++      if (PageHead(page))
++              return HPAGE_PMD_ORDER;
++      return 0;
++}
++
+ static inline int hpage_nr_pages(struct page *page)
+ {
+       if (unlikely(PageTransHuge(page)))
+@@ -290,6 +303,12 @@ static inline struct list_head *page_deferred_list(struct 
page *page)
+ #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
+ #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
+ 
++static inline unsigned int thp_order(struct page *page)
++{
++      VM_BUG_ON_PGFLAGS(PageTail(page), page);
++      return 0;
++}
++
+ #define hpage_nr_pages(x) 1
+ 
+ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index d8860f2d0976d..fc6ed1311589c 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -83,6 +83,8 @@ struct usbnet {
+ #             define EVENT_LINK_CHANGE        11
+ #             define EVENT_SET_RX_MODE        12
+ #             define EVENT_NO_IP_ALIGN        13
++      u32                     rx_speed;       /* in bps - NOT Mbps */
++      u32                     tx_speed;       /* in bps - NOT Mbps */
+ };
+ 
+ static inline struct usb_driver *driver_of(struct usb_interface *intf)
+diff --git a/include/linux/xarray.h b/include/linux/xarray.h
+index 3b257c97837db..2903f25bff5e4 100644
+--- a/include/linux/xarray.h
++++ b/include/linux/xarray.h
+@@ -1470,6 +1470,28 @@ void xas_pause(struct xa_state *);
+ 
+ void xas_create_range(struct xa_state *);
+ 
++#ifdef CONFIG_XARRAY_MULTI
++int xa_get_order(struct xarray *, unsigned long index);
++void xas_split(struct xa_state *, void *entry, unsigned int order);
++void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, 
gfp_t);
++#else
++static inline int xa_get_order(struct xarray *xa, unsigned long index)
++{
++      return 0;
++}
++
++static inline void xas_split(struct xa_state *xas, void *entry,
++              unsigned int order)
++{
++      xas_store(xas, entry);
++}
++
++static inline void xas_split_alloc(struct xa_state *xas, void *entry,
++              unsigned int order, gfp_t gfp)
++{
++}
++#endif
++
+ /**
+  * xas_reload() - Refetch an entry from the xarray.
+  * @xas: XArray operation state.
+diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
+index 48ecca8530ffa..b655d8666f555 100644
+--- a/include/net/caif/caif_dev.h
++++ b/include/net/caif/caif_dev.h
+@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
+  * The link_support layer is used to add any Link Layer specific
+  * framing.
+  */
+-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+                       struct cflayer *link_support, int head_room,
+                       struct cflayer **layer, int (**rcv_func)(
+                               struct sk_buff *, struct net_device *,
+diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
+index 2aa5e91d84576..8819ff4db35a6 100644
+--- a/include/net/caif/cfcnfg.h
++++ b/include/net/caif/cfcnfg.h
+@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
+  * @fcs:      Specify if checksum is used in CAIF Framing Layer.
+  * @head_room:        Head space needed by link specific protocol.
+  */
+-void
++int
+ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+                    struct net_device *dev, struct cflayer *phy_layer,
+                    enum cfcnfg_phy_preference pref,
+diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
+index 14a55e03bb3ce..67cce8757175a 100644
+--- a/include/net/caif/cfserl.h
++++ b/include/net/caif/cfserl.h
+@@ -9,4 +9,5 @@
+ #include <net/caif/caif_layer.h>
+ 
+ struct cflayer *cfserl_create(int instance, bool use_stx);
++void cfserl_release(struct cflayer *layer);
+ #endif
+diff --git a/init/main.c b/init/main.c
+index fef9e610b74b7..e6a1fb14f3085 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -1174,7 +1174,7 @@ static noinline void __init kernel_init_freeable(void)
+        */
+       set_mems_allowed(node_states[N_MEMORY]);
+ 
+-      cad_pid = task_pid(current);
++      cad_pid = get_pid(task_pid(current));
+ 
+       smp_prepare_cpus(setup_max_cpus);
+ 
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
+index 0c9d3ad17e0fc..4d0b59fa5550f 100644
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -260,7 +260,11 @@ static FORCE_INLINE int LZ4_decompress_generic(
+                               }
+                       }
+ 
+-                      memcpy(op, ip, length);
++                      /*
++                       * supports overlapping memory regions; only matters
++                       * for in-place decompression scenarios
++                       */
++                      LZ4_memmove(op, ip, length);
+                       ip += length;
+                       op += length;
+ 
+diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
+index 1a7fa9d9170fb..369eb181d730c 100644
+--- a/lib/lz4/lz4defs.h
++++ b/lib/lz4/lz4defs.h
+@@ -137,6 +137,8 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 
value)
+       return put_unaligned_le16(value, memPtr);
+ }
+ 
++#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
++
+ static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
+ {
+ #if LZ4_ARCH64
+diff --git a/lib/test_xarray.c b/lib/test_xarray.c
+index d4f97925dbd8d..8262c3f05a5d3 100644
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -1503,6 +1503,49 @@ static noinline void check_store_range(struct xarray 
*xa)
+       }
+ }
+ 
++#ifdef CONFIG_XARRAY_MULTI
++static void check_split_1(struct xarray *xa, unsigned long index,
++                                                      unsigned int order)
++{
++      XA_STATE(xas, xa, index);
++      void *entry;
++      unsigned int i = 0;
++
++      xa_store_order(xa, index, order, xa, GFP_KERNEL);
++
++      xas_split_alloc(&xas, xa, order, GFP_KERNEL);
++      xas_lock(&xas);
++      xas_split(&xas, xa, order);
++      xas_unlock(&xas);
++
++      xa_for_each(xa, index, entry) {
++              XA_BUG_ON(xa, entry != xa);
++              i++;
++      }
++      XA_BUG_ON(xa, i != 1 << order);
++
++      xa_set_mark(xa, index, XA_MARK_0);
++      XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
++
++      xa_destroy(xa);
++}
++
++static noinline void check_split(struct xarray *xa)
++{
++      unsigned int order;
++
++      XA_BUG_ON(xa, !xa_empty(xa));
++
++      for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
++              check_split_1(xa, 0, order);
++              check_split_1(xa, 1UL << order, order);
++              check_split_1(xa, 3UL << order, order);
++      }
++}
++#else
++static void check_split(struct xarray *xa) { }
++#endif
++
+ static void check_align_1(struct xarray *xa, char *name)
+ {
+       int i;
+@@ -1649,6 +1692,26 @@ static noinline void check_account(struct xarray *xa)
+ #endif
+ }
+ 
++static noinline void check_get_order(struct xarray *xa)
++{
++      unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
++      unsigned int order;
++      unsigned long i, j;
++
++      for (i = 0; i < 3; i++)
++              XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
++
++      for (order = 0; order < max_order; order++) {
++              for (i = 0; i < 10; i++) {
++                      xa_store_order(xa, i << order, order,
++                                      xa_mk_index(i << order), GFP_KERNEL);
++                      for (j = i << order; j < (i + 1) << order; j++)
++                              XA_BUG_ON(xa, xa_get_order(xa, j) != order);
++                      xa_erase(xa, i << order);
++              }
++      }
++}
++
+ static noinline void check_destroy(struct xarray *xa)
+ {
+       unsigned long index;
+@@ -1697,6 +1760,7 @@ static int xarray_checks(void)
+       check_reserve(&array);
+       check_reserve(&xa0);
+       check_multi_store(&array);
++      check_get_order(&array);
+       check_xa_alloc();
+       check_find(&array);
+       check_find_entry(&array);
+@@ -1708,6 +1772,7 @@ static int xarray_checks(void)
+       check_store_range(&array);
+       check_store_iter(&array);
+       check_align(&xa0);
++      check_split(&array);
+ 
+       check_workingset(&array, 0);
+       check_workingset(&array, 64);
+diff --git a/lib/xarray.c b/lib/xarray.c
+index 08d71c7b75990..7d22b30591275 100644
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -266,13 +266,14 @@ static void xa_node_free(struct xa_node *node)
+  */
+ static void xas_destroy(struct xa_state *xas)
+ {
+-      struct xa_node *node = xas->xa_alloc;
++      struct xa_node *next, *node = xas->xa_alloc;
+ 
+-      if (!node)
+-              return;
+-      XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+-      kmem_cache_free(radix_tree_node_cachep, node);
+-      xas->xa_alloc = NULL;
++      while (node) {
++              XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
++              next = rcu_dereference_raw(node->parent);
++              radix_tree_node_rcu_free(&node->rcu_head);
++              xas->xa_alloc = node = next;
++      }
+ }
+ 
+ /**
+@@ -304,6 +305,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
+       xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+       if (!xas->xa_alloc)
+               return false;
++      xas->xa_alloc->parent = NULL;
+       XA_NODE_BUG_ON(xas->xa_alloc, 
!list_empty(&xas->xa_alloc->private_list));
+       xas->xa_node = XAS_RESTART;
+       return true;
+@@ -339,6 +341,7 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
+       }
+       if (!xas->xa_alloc)
+               return false;
++      xas->xa_alloc->parent = NULL;
+       XA_NODE_BUG_ON(xas->xa_alloc, 
!list_empty(&xas->xa_alloc->private_list));
+       xas->xa_node = XAS_RESTART;
+       return true;
+@@ -403,7 +406,7 @@ static unsigned long xas_size(const struct xa_state *xas)
+ /*
+  * Use this to calculate the maximum index that will need to be created
+  * in order to add the entry described by @xas.  Because we cannot store a
+- * multiple-index entry at index 0, the calculation is a little more complex
++ * multi-index entry at index 0, the calculation is a little more complex
+  * than you might expect.
+  */
+ static unsigned long xas_max(struct xa_state *xas)
+@@ -946,6 +949,153 @@ void xas_init_marks(const struct xa_state *xas)
+ }
+ EXPORT_SYMBOL_GPL(xas_init_marks);
+ 
++#ifdef CONFIG_XARRAY_MULTI
++static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
++{
++      unsigned int marks = 0;
++      xa_mark_t mark = XA_MARK_0;
++
++      for (;;) {
++              if (node_get_mark(node, offset, mark))
++                      marks |= 1 << (__force unsigned int)mark;
++              if (mark == XA_MARK_MAX)
++                      break;
++              mark_inc(mark);
++      }
++
++      return marks;
++}
++
++static void node_set_marks(struct xa_node *node, unsigned int offset,
++                      struct xa_node *child, unsigned int marks)
++{
++      xa_mark_t mark = XA_MARK_0;
++
++      for (;;) {
++              if (marks & (1 << (__force unsigned int)mark)) {
++                      node_set_mark(node, offset, mark);
++                      if (child)
++                              node_mark_all(child, mark);
++              }
++              if (mark == XA_MARK_MAX)
++                      break;
++              mark_inc(mark);
++      }
++}
++
++/**
++ * xas_split_alloc() - Allocate memory for splitting an entry.
++ * @xas: XArray operation state.
++ * @entry: New entry which will be stored in the array.
++ * @order: New entry order.
++ * @gfp: Memory allocation flags.
++ *
++ * This function should be called before calling xas_split().
++ * If necessary, it will allocate new nodes (and fill them with @entry)
++ * to prepare for the upcoming split of an entry of @order size into
++ * entries of the order stored in the @xas.
++ *
++ * Context: May sleep if @gfp flags permit.
++ */
++void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
++              gfp_t gfp)
++{
++      unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
++      unsigned int mask = xas->xa_sibs;
++
++      /* XXX: no support for splitting really large entries yet */
++      if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
++              goto nomem;
++      if (xas->xa_shift + XA_CHUNK_SHIFT > order)
++              return;
++
++      do {
++              unsigned int i;
++              void *sibling;
++              struct xa_node *node;
++
++              node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
++              if (!node)
++                      goto nomem;
++              node->array = xas->xa;
++              for (i = 0; i < XA_CHUNK_SIZE; i++) {
++                      if ((i & mask) == 0) {
++                              RCU_INIT_POINTER(node->slots[i], entry);
++                              sibling = xa_mk_sibling(0);
++                      } else {
++                              RCU_INIT_POINTER(node->slots[i], sibling);
++                      }
++              }
++              RCU_INIT_POINTER(node->parent, xas->xa_alloc);
++              xas->xa_alloc = node;
++      } while (sibs-- > 0);
++
++      return;
++nomem:
++      xas_destroy(xas);
++      xas_set_err(xas, -ENOMEM);
++}
++EXPORT_SYMBOL_GPL(xas_split_alloc);
++
++/**
++ * xas_split() - Split a multi-index entry into smaller entries.
++ * @xas: XArray operation state.
++ * @entry: New entry to store in the array.
++ * @order: New entry order.
++ *
++ * The value in the entry is copied to all the replacement entries.
++ *
++ * Context: Any context.  The caller should hold the xa_lock.
++ */
++void xas_split(struct xa_state *xas, void *entry, unsigned int order)
++{
++      unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
++      unsigned int offset, marks;
++      struct xa_node *node;
++      void *curr = xas_load(xas);
++      int values = 0;
++
++      node = xas->xa_node;
++      if (xas_top(node))
++              return;
++
++      marks = node_get_marks(node, xas->xa_offset);
++
++      offset = xas->xa_offset + sibs;
++      do {
++              if (xas->xa_shift < node->shift) {
++                      struct xa_node *child = xas->xa_alloc;
++
++                      xas->xa_alloc = rcu_dereference_raw(child->parent);
++                      child->shift = node->shift - XA_CHUNK_SHIFT;
++                      child->offset = offset;
++                      child->count = XA_CHUNK_SIZE;
++                      child->nr_values = xa_is_value(entry) ?
++                                      XA_CHUNK_SIZE : 0;
++                      RCU_INIT_POINTER(child->parent, node);
++                      node_set_marks(node, offset, child, marks);
++                      rcu_assign_pointer(node->slots[offset],
++                                      xa_mk_node(child));
++                      if (xa_is_value(curr))
++                              values--;
++              } else {
++                      unsigned int canon = offset - xas->xa_sibs;
++
++                      node_set_marks(node, canon, NULL, marks);
++                      rcu_assign_pointer(node->slots[canon], entry);
++                      while (offset > canon)
++                              rcu_assign_pointer(node->slots[offset--],
++                                              xa_mk_sibling(canon));
++                      values += (xa_is_value(entry) - xa_is_value(curr)) *
++                                      (xas->xa_sibs + 1);
++              }
++      } while (offset-- > xas->xa_offset);
++
++      node->nr_values += values;
++}
++EXPORT_SYMBOL_GPL(xas_split);
++#endif
++
+ /**
+  * xas_pause() - Pause a walk to drop a lock.
+  * @xas: XArray operation state.
+@@ -1407,7 +1557,7 @@ EXPORT_SYMBOL(__xa_store);
+  * @gfp: Memory allocation flags.
+  *
+  * After this function returns, loads from this index will return @entry.
+- * Storing into an existing multislot entry updates the entry of every index.
++ * Storing into an existing multi-index entry updates the entry of every 
index.
+  * The marks associated with @index are unaffected unless @entry is %NULL.
+  *
+  * Context: Any context.  Takes and releases the xa_lock.
+@@ -1549,7 +1699,7 @@ static void xas_set_range(struct xa_state *xas, unsigned 
long first,
+  *
+  * After this function returns, loads from any index between @first and @last,
+  * inclusive will return @entry.
+- * Storing into an existing multislot entry updates the entry of every index.
++ * Storing into an existing multi-index entry updates the entry of every 
index.
+  * The marks associated with @index are unaffected unless @entry is %NULL.
+  *
+  * Context: Process context.  Takes and releases the xa_lock.  May sleep
+@@ -1592,6 +1742,46 @@ unlock:
+       return xas_result(&xas, NULL);
+ }
+ EXPORT_SYMBOL(xa_store_range);
++
++/**
++ * xa_get_order() - Get the order of an entry.
++ * @xa: XArray.
++ * @index: Index of the entry.
++ *
++ * Return: A number between 0 and 63 indicating the order of the entry.
++ */
++int xa_get_order(struct xarray *xa, unsigned long index)
++{
++      XA_STATE(xas, xa, index);
++      void *entry;
++      int order = 0;
++
++      rcu_read_lock();
++      entry = xas_load(&xas);
++
++      if (!entry)
++              goto unlock;
++
++      if (!xas.xa_node)
++              goto unlock;
++
++      for (;;) {
++              unsigned int slot = xas.xa_offset + (1 << order);
++
++              if (slot >= XA_CHUNK_SIZE)
++                      break;
++              if (!xa_is_sibling(xas.xa_node->slots[slot]))
++                      break;
++              order++;
++      }
++
++      order += xas.xa_node->shift;
++unlock:
++      rcu_read_unlock();
++
++      return order;
++}
++EXPORT_SYMBOL(xa_get_order);
+ #endif /* CONFIG_XARRAY_MULTI */
+ 
+ /**
+diff --git a/mm/filemap.c b/mm/filemap.c
+index db542b4948838..c10e237cc2c6e 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -856,7 +856,6 @@ noinline int __add_to_page_cache_locked(struct page *page,
+       int huge = PageHuge(page);
+       struct mem_cgroup *memcg;
+       int error;
+-      void *old;
+ 
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(PageSwapBacked(page), page);
+@@ -872,21 +871,41 @@ noinline int __add_to_page_cache_locked(struct page 
*page,
+       get_page(page);
+       page->mapping = mapping;
+       page->index = offset;
++      gfp_mask &= GFP_RECLAIM_MASK;
+ 
+       do {
++              unsigned int order = xa_get_order(xas.xa, xas.xa_index);
++              void *entry, *old = NULL;
++
++              if (order > thp_order(page))
++                      xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
++                                      order, gfp_mask);
+               xas_lock_irq(&xas);
+-              old = xas_load(&xas);
+-              if (old && !xa_is_value(old))
+-                      xas_set_err(&xas, -EEXIST);
++              xas_for_each_conflict(&xas, entry) {
++                      old = entry;
++                      if (!xa_is_value(entry)) {
++                              xas_set_err(&xas, -EEXIST);
++                              goto unlock;
++                      }
++              }
++
++              if (old) {
++                      if (shadowp)
++                              *shadowp = old;
++                      /* entry may have been split before we acquired lock */
++                      order = xa_get_order(xas.xa, xas.xa_index);
++                      if (order > thp_order(page)) {
++                              xas_split(&xas, old, order);
++                              xas_reset(&xas);
++                      }
++              }
++
+               xas_store(&xas, page);
+               if (xas_error(&xas))
+                       goto unlock;
+ 
+-              if (xa_is_value(old)) {
++              if (old)
+                       mapping->nrexceptional--;
+-                      if (shadowp)
+-                              *shadowp = old;
+-              }
+               mapping->nrpages++;
+ 
+               /* hugetlb pages do not participate in page cache accounting */
+@@ -894,7 +913,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
+                       __inc_node_page_state(page, NR_FILE_PAGES);
+ unlock:
+               xas_unlock_irq(&xas);
+-      } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
++      } while (xas_nomem(&xas, gfp_mask));
+ 
+       if (xas_error(&xas))
+               goto error;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 3b08e34a775df..fe15e7d8220ab 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4338,10 +4338,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+       struct page *page;
+ 
+       if (!*pagep) {
+-              ret = -ENOMEM;
++              /* If a page already exists, then it's UFFDIO_COPY for
++               * a non-missing case. Return -EEXIST.
++               */
++              if (vm_shared &&
++                  hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
++                      ret = -EEXIST;
++                      goto out;
++              }
++
+               page = alloc_huge_page(dst_vma, dst_addr, 0);
+-              if (IS_ERR(page))
++              if (IS_ERR(page)) {
++                      ret = -ENOMEM;
+                       goto out;
++              }
+ 
+               ret = copy_huge_page_from_user(page,
+                                               (const void __user *) src_addr,
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 83b324419ad3d..21a7ea9b70c8a 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1561,8 +1561,13 @@ setup_failed:
+       } else {
+               /* Init failed, cleanup */
+               flush_work(&hdev->tx_work);
+-              flush_work(&hdev->cmd_work);
++
++              /* Since hci_rx_work() is possible to awake new cmd_work
++               * it should be flushed first to avoid unexpected call of
++               * hci_cmd_work()
++               */
+               flush_work(&hdev->rx_work);
++              flush_work(&hdev->cmd_work);
+ 
+               skb_queue_purge(&hdev->cmd_q);
+               skb_queue_purge(&hdev->rx_q);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 8159b344deef5..8d2c26c4b6d3d 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -755,7 +755,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
+               /* Detach sockets from device */
+               read_lock(&hci_sk_list.lock);
+               sk_for_each(sk, &hci_sk_list.head) {
+-                      bh_lock_sock_nested(sk);
++                      lock_sock(sk);
+                       if (hci_pi(sk)->hdev == hdev) {
+                               hci_pi(sk)->hdev = NULL;
+                               sk->sk_err = EPIPE;
+@@ -764,7 +764,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
+ 
+                               hci_dev_put(hdev);
+                       }
+-                      bh_unlock_sock(sk);
++                      release_sock(sk);
+               }
+               read_unlock(&hci_sk_list.lock);
+       }
+diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
+index 03c7cdd8e4cbf..8a3c19b9a9958 100644
+--- a/net/caif/caif_dev.c
++++ b/net/caif/caif_dev.c
+@@ -307,7 +307,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
+       caifd_put(caifd);
+ }
+ 
+-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+                    struct cflayer *link_support, int head_room,
+                    struct cflayer **layer,
+                    int (**rcv_func)(struct sk_buff *, struct net_device *,
+@@ -318,11 +318,12 @@ void caif_enroll_dev(struct net_device *dev, struct 
caif_dev_common *caifdev,
+       enum cfcnfg_phy_preference pref;
+       struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
+       struct caif_device_entry_list *caifdevs;
++      int res;
+ 
+       caifdevs = caif_device_list(dev_net(dev));
+       caifd = caif_device_alloc(dev);
+       if (!caifd)
+-              return;
++              return -ENOMEM;
+       *layer = &caifd->layer;
+       spin_lock_init(&caifd->flow_lock);
+ 
+@@ -343,7 +344,7 @@ void caif_enroll_dev(struct net_device *dev, struct 
caif_dev_common *caifdev,
+       strlcpy(caifd->layer.name, dev->name,
+               sizeof(caifd->layer.name));
+       caifd->layer.transmit = transmit;
+-      cfcnfg_add_phy_layer(cfg,
++      res = cfcnfg_add_phy_layer(cfg,
+                               dev,
+                               &caifd->layer,
+                               pref,
+@@ -353,6 +354,7 @@ void caif_enroll_dev(struct net_device *dev, struct 
caif_dev_common *caifdev,
+       mutex_unlock(&caifdevs->lock);
+       if (rcv_func)
+               *rcv_func = receive;
++      return res;
+ }
+ EXPORT_SYMBOL(caif_enroll_dev);
+ 
+@@ -367,6 +369,7 @@ static int caif_device_notify(struct notifier_block *me, 
unsigned long what,
+       struct cflayer *layer, *link_support;
+       int head_room = 0;
+       struct caif_device_entry_list *caifdevs;
++      int res;
+ 
+       cfg = get_cfcnfg(dev_net(dev));
+       caifdevs = caif_device_list(dev_net(dev));
+@@ -392,8 +395,10 @@ static int caif_device_notify(struct notifier_block *me, 
unsigned long what,
+                               break;
+                       }
+               }
+-              caif_enroll_dev(dev, caifdev, link_support, head_room,
++              res = caif_enroll_dev(dev, caifdev, link_support, head_room,
+                               &layer, NULL);
++              if (res)
++                      cfserl_release(link_support);
+               caifdev->flowctrl = dev_flowctrl;
+               break;
+ 
+diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
+index 76bd67891fb39..46c62dd1479b8 100644
+--- a/net/caif/caif_usb.c
++++ b/net/caif/caif_usb.c
+@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 
ethaddr[ETH_ALEN],
+       return (struct cflayer *) this;
+ }
+ 
++static void cfusbl_release(struct cflayer *layer)
++{
++      kfree(layer);
++}
++
+ static struct packet_type caif_usb_type __read_mostly = {
+       .type = cpu_to_be16(ETH_P_802_EX1),
+ };
+@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, 
unsigned long what,
+       struct cflayer *layer, *link_support;
+       struct usbnet *usbnet;
+       struct usb_device *usbdev;
++      int res;
+ 
+       /* Check whether we have a NCM device, and find its VID/PID. */
+       if (!(dev->dev.parent && dev->dev.parent->driver &&
+@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block 
*me, unsigned long what,
+       if (dev->num_tx_queues > 1)
+               pr_warn("USB device uses more than one tx queue\n");
+ 
+-      caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
++      res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+                       &layer, &caif_usb_type.func);
++      if (res)
++              goto err;
++
+       if (!pack_added)
+               dev_add_pack(&caif_usb_type);
+       pack_added = true;
+@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, 
unsigned long what,
+       strlcpy(layer->name, dev->name, sizeof(layer->name));
+ 
+       return 0;
++err:
++      cfusbl_release(link_support);
++      return res;
+ }
+ 
+ static struct notifier_block caif_device_notifier = {
+diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
+index 399239a14420f..cac30e676ac94 100644
+--- a/net/caif/cfcnfg.c
++++ b/net/caif/cfcnfg.c
+@@ -450,7 +450,7 @@ unlock:
+       rcu_read_unlock();
+ }
+ 
+-void
++int
+ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+                    struct net_device *dev, struct cflayer *phy_layer,
+                    enum cfcnfg_phy_preference pref,
+@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ {
+       struct cflayer *frml;
+       struct cfcnfg_phyinfo *phyinfo = NULL;
+-      int i;
++      int i, res = 0;
+       u8 phyid;
+ 
+       mutex_lock(&cnfg->lock);
+@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+                       goto got_phyid;
+       }
+       pr_warn("Too many CAIF Link Layers (max 6)\n");
++      res = -EEXIST;
+       goto out;
+ 
+ got_phyid:
+       phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
+-      if (!phyinfo)
++      if (!phyinfo) {
++              res = -ENOMEM;
+               goto out_err;
++      }
+ 
+       phy_layer->id = phyid;
+       phyinfo->pref = pref;
+@@ -492,8 +495,10 @@ got_phyid:
+ 
+       frml = cffrml_create(phyid, fcs);
+ 
+-      if (!frml)
++      if (!frml) {
++              res = -ENOMEM;
+               goto out_err;
++      }
+       phyinfo->frm_layer = frml;
+       layer_set_up(frml, cnfg->mux);
+ 
+@@ -511,11 +516,12 @@ got_phyid:
+       list_add_rcu(&phyinfo->node, &cnfg->phys);
+ out:
+       mutex_unlock(&cnfg->lock);
+-      return;
++      return res;
+ 
+ out_err:
+       kfree(phyinfo);
+       mutex_unlock(&cnfg->lock);
++      return res;
+ }
+ EXPORT_SYMBOL(cfcnfg_add_phy_layer);
+ 
+diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
+index e11725a4bb0ed..40cd57ad0a0f4 100644
+--- a/net/caif/cfserl.c
++++ b/net/caif/cfserl.c
+@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct 
cfpkt *pkt);
+ static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+                          int phyid);
+ 
++void cfserl_release(struct cflayer *layer)
++{
++      kfree(layer);
++}
++
+ struct cflayer *cfserl_create(int instance, bool use_stx)
+ {
+       struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 472a615775f32..f94d405358a21 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -239,6 +239,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ 
+                       write_lock(&n->lock);
+                       if ((n->nud_state == NUD_FAILED) ||
++                          (n->nud_state == NUD_NOARP) ||
+                           (tbl->is_multicast &&
+                            tbl->is_multicast(n->primary_key)) ||
+                           time_after(tref, n->updated))
+diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
+index d19c40c684e80..71be751123210 100644
+--- a/net/ieee802154/nl-mac.c
++++ b/net/ieee802154/nl-mac.c
+@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, 
struct genl_info *info)
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+                       be32_to_cpu(params.frame_counter)) ||
+-          ieee802154_llsec_fill_key_id(msg, &params.out_key))
++          ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
++              rc = -ENOBUFS;
+               goto out_free;
++      }
+ 
+       dev_put(dev);
+ 
+diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
+index 2cdc7e63fe172..88215b5c93aa4 100644
+--- a/net/ieee802154/nl-phy.c
++++ b/net/ieee802154/nl-phy.c
+@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct 
genl_info *info)
+       }
+ 
+       if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+-          nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
++          nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
++              rc = -EMSGSIZE;
+               goto nla_put_failure;
++      }
+       dev_put(dev);
+ 
+       wpan_phy_put(phy);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 3a9bd9687e7d1..b903fe28ce507 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3688,11 +3688,11 @@ static struct fib6_info *ip6_route_info_create(struct 
fib6_config *cfg,
+       if (nh) {
+               if (rt->fib6_src.plen) {
+                       NL_SET_ERR_MSG(extack, "Nexthops can not be used with 
source routing");
+-                      goto out;
++                      goto out_free;
+               }
+               if (!nexthop_get(nh)) {
+                       NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+-                      goto out;
++                      goto out_free;
+               }
+               rt->nh = nh;
+               fib6_nh = nexthop_fib6_nh(rt->nh);
+@@ -3729,6 +3729,10 @@ static struct fib6_info *ip6_route_info_create(struct 
fib6_config *cfg,
+ out:
+       fib6_info_release(rt);
+       return ERR_PTR(err);
++out_free:
++      ip_fib_metrics_put(rt->fib6_metrics);
++      kfree(rt);
++      return ERR_PTR(err);
+ }
+ 
+ int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 99168af0c28d9..f93fa0e210979 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1340,7 +1340,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct 
ip_vs_service_user_kern *u,
+       ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
+       svc->port = u->port;
+       svc->fwmark = u->fwmark;
+-      svc->flags = u->flags;
++      svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
+       svc->timeout = u->timeout * HZ;
+       svc->netmask = u->netmask;
+       svc->ipvs = ipvs;
+diff --git a/net/netfilter/nf_conntrack_proto.c 
b/net/netfilter/nf_conntrack_proto.c
+index aaf4293ddd459..75e6b429635da 100644
+--- a/net/netfilter/nf_conntrack_proto.c
++++ b/net/netfilter/nf_conntrack_proto.c
+@@ -660,7 +660,7 @@ int nf_conntrack_proto_init(void)
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ cleanup_sockopt:
+-      nf_unregister_sockopt(&so_getorigdst6);
++      nf_unregister_sockopt(&so_getorigdst);
+ #endif
+       return ret;
+ }
+diff --git a/net/netfilter/nfnetlink_cthelper.c 
b/net/netfilter/nfnetlink_cthelper.c
+index 81406b93f126d..3d5fc07b2530b 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -380,10 +380,14 @@ static int
+ nfnl_cthelper_update(const struct nlattr * const tb[],
+                    struct nf_conntrack_helper *helper)
+ {
++      u32 size;
+       int ret;
+ 
+-      if (tb[NFCTH_PRIV_DATA_LEN])
+-              return -EBUSY;
++      if (tb[NFCTH_PRIV_DATA_LEN]) {
++              size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
++              if (size != helper->data_len)
++                      return -EBUSY;
++      }
+ 
+       if (tb[NFCTH_POLICY]) {
+               ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 2042c6f4629cc..28991730728b9 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -1218,7 +1218,7 @@ static void nft_ct_expect_obj_eval(struct nft_object 
*obj,
+       struct nf_conn *ct;
+ 
+       ct = nf_ct_get(pkt->skb, &ctinfo);
+-      if (!ct || ctinfo == IP_CT_UNTRACKED) {
++      if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
+               regs->verdict.code = NFT_BREAK;
+               return;
+       }
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 0d4246af6c02b..a7e861eede2d9 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct 
sockaddr *addr, int alen)
+       if (!llcp_sock->service_name) {
+               nfc_llcp_local_put(llcp_sock->local);
+               llcp_sock->local = NULL;
++              llcp_sock->dev = NULL;
+               ret = -ENOMEM;
+               goto put_dev;
+       }
+@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct 
sockaddr *addr, int alen)
+               llcp_sock->local = NULL;
+               kfree(llcp_sock->service_name);
+               llcp_sock->service_name = NULL;
++              llcp_sock->dev = NULL;
+               ret = -EADDRINUSE;
+               goto put_dev;
+       }
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 6119c31dcd072..31eb8eefc8681 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -648,9 +648,6 @@ static int tcf_ct_fill_params(struct net *net,
+                                  sizeof(p->zone));
+       }
+ 
+-      if (p->zone == NF_CT_DEFAULT_ZONE_ID)
+-              return 0;
+-
+       nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
+       tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
+       if (!tmpl) {
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 0214aa1c44278..8bd2454cc89dc 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -233,7 +233,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 
bearer_id, u32 dest)
+  */
+ static int tipc_enable_bearer(struct net *net, const char *name,
+                             u32 disc_domain, u32 prio,
+-                            struct nlattr *attr[])
++                            struct nlattr *attr[],
++                            struct netlink_ext_ack *extack)
+ {
+       struct tipc_net *tn = tipc_net(net);
+       struct tipc_bearer_names b_names;
+@@ -244,20 +245,24 @@ static int tipc_enable_bearer(struct net *net, const 
char *name,
+       int bearer_id = 0;
+       int res = -EINVAL;
+       char *errstr = "";
++      u32 i;
+ 
+       if (!bearer_name_validate(name, &b_names)) {
+               errstr = "illegal name";
++              NL_SET_ERR_MSG(extack, "Illegal name");
+               goto rejected;
+       }
+ 
+       if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
+               errstr = "illegal priority";
++              NL_SET_ERR_MSG(extack, "Illegal priority");
+               goto rejected;
+       }
+ 
+       m = tipc_media_find(b_names.media_name);
+       if (!m) {
+               errstr = "media not registered";
++              NL_SET_ERR_MSG(extack, "Media not registered");
+               goto rejected;
+       }
+ 
+@@ -265,33 +270,43 @@ static int tipc_enable_bearer(struct net *net, const 
char *name,
+               prio = m->priority;
+ 
+       /* Check new bearer vs existing ones and find free bearer id if any */
+-      while (bearer_id < MAX_BEARERS) {
+-              b = rtnl_dereference(tn->bearer_list[bearer_id]);
+-              if (!b)
+-                      break;
++      bearer_id = MAX_BEARERS;
++      i = MAX_BEARERS;
++      while (i-- != 0) {
++              b = rtnl_dereference(tn->bearer_list[i]);
++              if (!b) {
++                      bearer_id = i;
++                      continue;
++              }
+               if (!strcmp(name, b->name)) {
+                       errstr = "already enabled";
++                      NL_SET_ERR_MSG(extack, "Already enabled");
+                       goto rejected;
+               }
+-              bearer_id++;
+-              if (b->priority != prio)
+-                      continue;
+-              if (++with_this_prio <= 2)
+-                      continue;
+-              pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
+-                      name, prio);
+-              if (prio == TIPC_MIN_LINK_PRI) {
+-                      errstr = "cannot adjust to lower";
+-                      goto rejected;
++
++              if (b->priority == prio &&
++                  (++with_this_prio > 2)) {
++                      pr_warn("Bearer <%s>: already 2 bearers with priority 
%u\n",
++                              name, prio);
++
++                      if (prio == TIPC_MIN_LINK_PRI) {
++                              errstr = "cannot adjust to lower";
++                              NL_SET_ERR_MSG(extack, "Cannot adjust to 
lower");
++                              goto rejected;
++                      }
++
++                      pr_warn("Bearer <%s>: trying with adjusted priority\n",
++                              name);
++                      prio--;
++                      bearer_id = MAX_BEARERS;
++                      i = MAX_BEARERS;
++                      with_this_prio = 1;
+               }
+-              pr_warn("Bearer <%s>: trying with adjusted priority\n", name);
+-              prio--;
+-              bearer_id = 0;
+-              with_this_prio = 1;
+       }
+ 
+       if (bearer_id >= MAX_BEARERS) {
+               errstr = "max 3 bearers permitted";
++              NL_SET_ERR_MSG(extack, "Max 3 bearers permitted");
+               goto rejected;
+       }
+ 
+@@ -305,6 +320,7 @@ static int tipc_enable_bearer(struct net *net, const char 
*name,
+       if (res) {
+               kfree(b);
+               errstr = "failed to enable media";
++              NL_SET_ERR_MSG(extack, "Failed to enable media");
+               goto rejected;
+       }
+ 
+@@ -320,6 +336,7 @@ static int tipc_enable_bearer(struct net *net, const char 
*name,
+       if (res) {
+               bearer_disable(net, b);
+               errstr = "failed to create discoverer";
++              NL_SET_ERR_MSG(extack, "Failed to create discoverer");
+               goto rejected;
+       }
+ 
+@@ -862,6 +879,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct 
genl_info *info)
+       bearer = tipc_bearer_find(net, name);
+       if (!bearer) {
+               err = -EINVAL;
++              NL_SET_ERR_MSG(info->extack, "Bearer not found");
+               goto err_out;
+       }
+ 
+@@ -901,8 +919,10 @@ int __tipc_nl_bearer_disable(struct sk_buff *skb, struct 
genl_info *info)
+       name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+ 
+       bearer = tipc_bearer_find(net, name);
+-      if (!bearer)
++      if (!bearer) {
++              NL_SET_ERR_MSG(info->extack, "Bearer not found");
+               return -EINVAL;
++      }
+ 
+       bearer_disable(net, bearer);
+ 
+@@ -960,7 +980,8 @@ int __tipc_nl_bearer_enable(struct sk_buff *skb, struct 
genl_info *info)
+                       prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+       }
+ 
+-      return tipc_enable_bearer(net, bearer, domain, prio, attrs);
++      return tipc_enable_bearer(net, bearer, domain, prio, attrs,
++                                info->extack);
+ }
+ 
+ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+@@ -999,6 +1020,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct 
genl_info *info)
+       b = tipc_bearer_find(net, name);
+       if (!b) {
+               rtnl_unlock();
++              NL_SET_ERR_MSG(info->extack, "Bearer not found");
+               return -EINVAL;
+       }
+ 
+@@ -1039,8 +1061,10 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct 
genl_info *info)
+       name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+ 
+       b = tipc_bearer_find(net, name);
+-      if (!b)
++      if (!b) {
++              NL_SET_ERR_MSG(info->extack, "Bearer not found");
+               return -EINVAL;
++      }
+ 
+       if (attrs[TIPC_NLA_BEARER_PROP]) {
+               struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+@@ -1059,12 +1083,18 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct 
genl_info *info)
+               if (props[TIPC_NLA_PROP_WIN])
+                       b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+               if (props[TIPC_NLA_PROP_MTU]) {
+-                      if (b->media->type_id != TIPC_MEDIA_TYPE_UDP)
++                      if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
++                              NL_SET_ERR_MSG(info->extack,
++                                             "MTU property is unsupported");
+                               return -EINVAL;
++                      }
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+                       if (tipc_udp_mtu_bad(nla_get_u32
+-                                           (props[TIPC_NLA_PROP_MTU])))
++                                           (props[TIPC_NLA_PROP_MTU]))) {
++                              NL_SET_ERR_MSG(info->extack,
++                                             "MTU value is out-of-range");
+                               return -EINVAL;
++                      }
+                       b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+                       tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU);
+ #endif
+@@ -1192,6 +1222,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct 
genl_info *info)
+       rtnl_lock();
+       media = tipc_media_find(name);
+       if (!media) {
++              NL_SET_ERR_MSG(info->extack, "Media not found");
+               err = -EINVAL;
+               goto err_out;
+       }
+@@ -1228,9 +1259,10 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct 
genl_info *info)
+       name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
+ 
+       m = tipc_media_find(name);
+-      if (!m)
++      if (!m) {
++              NL_SET_ERR_MSG(info->extack, "Media not found");
+               return -EINVAL;
+-
++      }
+       if (attrs[TIPC_NLA_MEDIA_PROP]) {
+               struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+ 
+@@ -1246,12 +1278,18 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct 
genl_info *info)
+               if (props[TIPC_NLA_PROP_WIN])
+                       m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+               if (props[TIPC_NLA_PROP_MTU]) {
+-                      if (m->type_id != TIPC_MEDIA_TYPE_UDP)
++                      if (m->type_id != TIPC_MEDIA_TYPE_UDP) {
++                              NL_SET_ERR_MSG(info->extack,
++                                             "MTU property is unsupported");
+                               return -EINVAL;
++                      }
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+                       if (tipc_udp_mtu_bad(nla_get_u32
+-                                           (props[TIPC_NLA_PROP_MTU])))
++                                           (props[TIPC_NLA_PROP_MTU]))) {
++                              NL_SET_ERR_MSG(info->extack,
++                                             "MTU value is out-of-range");
+                               return -EINVAL;
++                      }
+                       m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+ #endif
+               }
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index d83c8e009448a..17621d22fb175 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -433,6 +433,8 @@ void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev);
+ 
+ /* internal helpers */
+ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
++bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev,
++                          int key_idx, bool pairwise);
+ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
+                                  struct key_params *params, int key_idx,
+                                  bool pairwise, const u8 *mac_addr);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 5bb2316befb98..7b170ed6923e7 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3979,9 +3979,6 @@ static int nl80211_del_key(struct sk_buff *skb, struct 
genl_info *info)
+       if (err)
+               return err;
+ 
+-      if (key.idx < 0)
+-              return -EINVAL;
+-
+       if (info->attrs[NL80211_ATTR_MAC])
+               mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ 
+@@ -3997,6 +3994,10 @@ static int nl80211_del_key(struct sk_buff *skb, struct 
genl_info *info)
+           key.type != NL80211_KEYTYPE_GROUP)
+               return -EINVAL;
+ 
++      if (!cfg80211_valid_key_idx(rdev, key.idx,
++                                  key.type == NL80211_KEYTYPE_PAIRWISE))
++              return -EINVAL;
++
+       if (!rdev->ops->del_key)
+               return -EOPNOTSUPP;
+ 
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 82244e2fc1f54..4eae6ad328514 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -229,11 +229,48 @@ bool cfg80211_supported_cipher_suite(struct wiphy 
*wiphy, u32 cipher)
+       return false;
+ }
+ 
++static bool
++cfg80211_igtk_cipher_supported(struct cfg80211_registered_device *rdev)
++{
++      struct wiphy *wiphy = &rdev->wiphy;
++      int i;
++
++      for (i = 0; i < wiphy->n_cipher_suites; i++) {
++              switch (wiphy->cipher_suites[i]) {
++              case WLAN_CIPHER_SUITE_AES_CMAC:
++              case WLAN_CIPHER_SUITE_BIP_CMAC_256:
++              case WLAN_CIPHER_SUITE_BIP_GMAC_128:
++              case WLAN_CIPHER_SUITE_BIP_GMAC_256:
++                      return true;
++              }
++      }
++
++      return false;
++}
++
++bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev,
++                          int key_idx, bool pairwise)
++{
++      int max_key_idx;
++
++      if (pairwise)
++              max_key_idx = 3;
++      else if (cfg80211_igtk_cipher_supported(rdev))
++              max_key_idx = 5;
++      else
++              max_key_idx = 3;
++
++      if (key_idx < 0 || key_idx > max_key_idx)
++              return false;
++
++      return true;
++}
++
+ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
+                                  struct key_params *params, int key_idx,
+                                  bool pairwise, const u8 *mac_addr)
+ {
+-      if (key_idx < 0 || key_idx > 5)
++      if (!cfg80211_valid_key_idx(rdev, key_idx, pairwise))
+               return -EINVAL;
+ 
+       if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
+index 2719bb2596530..a760e130bd0d6 100644
+--- a/samples/vfio-mdev/mdpy-fb.c
++++ b/samples/vfio-mdev/mdpy-fb.c
+@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
+       if (format != DRM_FORMAT_XRGB8888) {
+               pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
+                       format, DRM_FORMAT_XRGB8888);
+-              return -EINVAL;
++              ret = -EINVAL;
++              goto err_release_regions;
+       }
+       if (width < 100  || width > 10000) {
+               pci_err(pdev, "width (%d) out of range\n", width);
+-              return -EINVAL;
++              ret = -EINVAL;
++              goto err_release_regions;
+       }
+       if (height < 100 || height > 10000) {
+               pci_err(pdev, "height (%d) out of range\n", height);
+-              return -EINVAL;
++              ret = -EINVAL;
++              goto err_release_regions;
+       }
+       pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
+                width, height);
+ 
+       info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
+-      if (!info)
++      if (!info) {
++              ret = -ENOMEM;
+               goto err_release_regions;
++      }
+       pci_set_drvdata(pdev, info);
+       par = info->par;
+ 
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 013f0e69ff0f7..b5a0ba79bf746 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -491,9 +491,10 @@ static void snd_timer_notify1(struct snd_timer_instance 
*ti, int event)
+               return;
+       if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
+               return;
++      event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
+       list_for_each_entry(ts, &ti->slave_active_head, active_list)
+               if (ts->ccallback)
+-                      ts->ccallback(ts, event + 100, &tstamp, resolution);
++                      ts->ccallback(ts, event, &tstamp, resolution);
+ }
+ 
+ /* start/continue a master timer */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b9fa2ee0a40cb..de40bb99b6793 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8062,6 +8062,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", 
ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", 
ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", 
ALC269_FIXUP_HP_MUTE_LED_MIC3),
++      SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", 
ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", 
ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", 
ALC295_FIXUP_HP_OMEN),
+       SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", 
ALC269_FIXUP_HP_MUTE_LED_MIC3),
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 5251818e10d33..d926869c031b1 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1697,7 +1697,7 @@ static struct snd_kcontrol_new snd_microii_mixer_spdif[] 
= {
+ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
+ {
+       int err, i;
+-      const static usb_mixer_elem_resume_func_t resume_funcs[] = {
++      static const usb_mixer_elem_resume_func_t resume_funcs[] = {
+               snd_microii_spdif_default_update,
+               NULL,
+               snd_microii_spdif_switch_update

Reply via email to