commit:     688ece981c67234af4e9b2ee34676eb3fec43278
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Oct 22 16:56:26 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 22 16:56:26 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=688ece98

Linux patch 6.11.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1004_linux-6.11.5.patch | 3901 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3905 insertions(+)

diff --git a/0000_README b/0000_README
index df7729f5..70f8b56f 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-6.11.4.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.11.4
 
+Patch:  1004_linux-6.11.5.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.11.5
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   
http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1004_linux-6.11.5.patch b/1004_linux-6.11.5.patch
new file mode 100644
index 00000000..d7a8488f
--- /dev/null
+++ b/1004_linux-6.11.5.patch
@@ -0,0 +1,3901 @@
+diff --git a/Makefile b/Makefile
+index 50c615983e4405..687ce7aee67a73 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 11
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi 
b/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi
+index 4676e3488f54d5..cb8d54895a7775 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi
+@@ -136,7 +136,7 @@ cp0_i2c0_pins: cp0-i2c0-pins {
+               };
+ 
+               cp0_mdio_pins: cp0-mdio-pins {
+-                      marvell,pins = "mpp40", "mpp41";
++                      marvell,pins = "mpp0", "mpp1";
+                       marvell,function = "ge";
+               };
+ 
+diff --git a/arch/arm64/include/asm/uprobes.h 
b/arch/arm64/include/asm/uprobes.h
+index 2b09495499c618..014b02897f8e22 100644
+--- a/arch/arm64/include/asm/uprobes.h
++++ b/arch/arm64/include/asm/uprobes.h
+@@ -10,11 +10,9 @@
+ #include <asm/insn.h>
+ #include <asm/probes.h>
+ 
+-#define MAX_UINSN_BYTES               AARCH64_INSN_SIZE
+-
+ #define UPROBE_SWBP_INSN      cpu_to_le32(BRK64_OPCODE_UPROBES)
+ #define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
+-#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
++#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
+ 
+ typedef __le32 uprobe_opcode_t;
+ 
+@@ -23,8 +21,8 @@ struct arch_uprobe_task {
+ 
+ struct arch_uprobe {
+       union {
+-              u8 insn[MAX_UINSN_BYTES];
+-              u8 ixol[MAX_UINSN_BYTES];
++              __le32 insn;
++              __le32 ixol;
+       };
+       struct arch_probe_insn api;
+       bool simulate;
+diff --git a/arch/arm64/kernel/probes/decode-insn.c 
b/arch/arm64/kernel/probes/decode-insn.c
+index 968d5fffe23302..3496d6169e59b2 100644
+--- a/arch/arm64/kernel/probes/decode-insn.c
++++ b/arch/arm64/kernel/probes/decode-insn.c
+@@ -99,10 +99,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct 
arch_probe_insn *api)
+           aarch64_insn_is_blr(insn) ||
+           aarch64_insn_is_ret(insn)) {
+               api->handler = simulate_br_blr_ret;
+-      } else if (aarch64_insn_is_ldr_lit(insn)) {
+-              api->handler = simulate_ldr_literal;
+-      } else if (aarch64_insn_is_ldrsw_lit(insn)) {
+-              api->handler = simulate_ldrsw_literal;
+       } else {
+               /*
+                * Instruction cannot be stepped out-of-line and we don't
+@@ -140,6 +136,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct 
arch_specific_insn *asi)
+       probe_opcode_t insn = le32_to_cpu(*addr);
+       probe_opcode_t *scan_end = NULL;
+       unsigned long size = 0, offset = 0;
++      struct arch_probe_insn *api = &asi->api;
++
++      if (aarch64_insn_is_ldr_lit(insn)) {
++              api->handler = simulate_ldr_literal;
++              decoded = INSN_GOOD_NO_SLOT;
++      } else if (aarch64_insn_is_ldrsw_lit(insn)) {
++              api->handler = simulate_ldrsw_literal;
++              decoded = INSN_GOOD_NO_SLOT;
++      } else {
++              decoded = arm_probe_decode_insn(insn, &asi->api);
++      }
+ 
+       /*
+        * If there's a symbol defined in front of and near enough to
+@@ -157,7 +164,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct 
arch_specific_insn *asi)
+               else
+                       scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
+       }
+-      decoded = arm_probe_decode_insn(insn, &asi->api);
+ 
+       if (decoded != INSN_REJECTED && scan_end)
+               if (is_probed_address_atomic(addr - 1, scan_end))
+diff --git a/arch/arm64/kernel/probes/simulate-insn.c 
b/arch/arm64/kernel/probes/simulate-insn.c
+index 22d0b32524763e..b65334ab79d2b0 100644
+--- a/arch/arm64/kernel/probes/simulate-insn.c
++++ b/arch/arm64/kernel/probes/simulate-insn.c
+@@ -171,17 +171,15 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs 
*regs)
+ void __kprobes
+ simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
+ {
+-      u64 *load_addr;
++      unsigned long load_addr;
+       int xn = opcode & 0x1f;
+-      int disp;
+ 
+-      disp = ldr_displacement(opcode);
+-      load_addr = (u64 *) (addr + disp);
++      load_addr = addr + ldr_displacement(opcode);
+ 
+       if (opcode & (1 << 30)) /* x0-x30 */
+-              set_x_reg(regs, xn, *load_addr);
++              set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr));
+       else                    /* w0-w30 */
+-              set_w_reg(regs, xn, *load_addr);
++              set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr));
+ 
+       instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+ }
+@@ -189,14 +187,12 @@ simulate_ldr_literal(u32 opcode, long addr, struct 
pt_regs *regs)
+ void __kprobes
+ simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
+ {
+-      s32 *load_addr;
++      unsigned long load_addr;
+       int xn = opcode & 0x1f;
+-      int disp;
+ 
+-      disp = ldr_displacement(opcode);
+-      load_addr = (s32 *) (addr + disp);
++      load_addr = addr + ldr_displacement(opcode);
+ 
+-      set_x_reg(regs, xn, *load_addr);
++      set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr));
+ 
+       instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+ }
+diff --git a/arch/arm64/kernel/probes/uprobes.c 
b/arch/arm64/kernel/probes/uprobes.c
+index d49aef2657cdf7..a2f137a595fc1c 100644
+--- a/arch/arm64/kernel/probes/uprobes.c
++++ b/arch/arm64/kernel/probes/uprobes.c
+@@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, 
struct mm_struct *mm,
+       else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+               return -EINVAL;
+ 
+-      insn = *(probe_opcode_t *)(&auprobe->insn[0]);
++      insn = le32_to_cpu(auprobe->insn);
+ 
+       switch (arm_probe_decode_insn(insn, &auprobe->api)) {
+       case INSN_REJECTED:
+@@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, 
struct pt_regs *regs)
+       if (!auprobe->simulate)
+               return false;
+ 
+-      insn = *(probe_opcode_t *)(&auprobe->insn[0]);
++      insn = le32_to_cpu(auprobe->insn);
+       addr = instruction_pointer(regs);
+ 
+       if (auprobe->api.handler)
+diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
+index 2a32438e09ceba..74f73141f9b96b 100644
+--- a/arch/s390/kvm/diag.c
++++ b/arch/s390/kvm/diag.c
+@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
+       vcpu->stat.instruction_diagnose_258++;
+       if (vcpu->run->s.regs.gprs[rx] & 7)
+               return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+-      rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, 
sizeof(parm));
++      rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, 
sizeof(parm));
+       if (rc)
+               return kvm_s390_inject_prog_cond(vcpu, rc);
+       if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
+diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
+index e65f597e3044a7..a688351f4ab521 100644
+--- a/arch/s390/kvm/gaccess.c
++++ b/arch/s390/kvm/gaccess.c
+@@ -828,6 +828,8 @@ static int access_guest_page(struct kvm *kvm, enum 
gacc_mode mode, gpa_t gpa,
+       const gfn_t gfn = gpa_to_gfn(gpa);
+       int rc;
+ 
++      if (!gfn_to_memslot(kvm, gfn))
++              return PGM_ADDRESSING;
+       if (mode == GACC_STORE)
+               rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
+       else
+@@ -985,6 +987,8 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long 
gra,
+               gra += fragment_len;
+               data += fragment_len;
+       }
++      if (rc > 0)
++              vcpu->arch.pgm.code = rc;
+       return rc;
+ }
+ 
+diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
+index b320d12aa04934..3fde45a151f22e 100644
+--- a/arch/s390/kvm/gaccess.h
++++ b/arch/s390/kvm/gaccess.h
+@@ -405,11 +405,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long 
gpa, void *data,
+  * @len: number of bytes to copy
+  *
+  * Copy @len bytes from @data (kernel space) to @gra (guest real address).
+- * It is up to the caller to ensure that the entire guest memory range is
+- * valid memory before calling this function.
+  * Guest low address and key protection are not checked.
+  *
+- * Returns zero on success or -EFAULT on error.
++ * Returns zero on success, -EFAULT when copying from @data failed, or
++ * PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check 
info
++ * is also stored to allow injecting into the guest (if applicable) using
++ * kvm_s390_inject_prog_cond().
+  *
+  * If an error occurs data may have been copied partially to guest memory.
+  */
+@@ -428,11 +429,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned 
long gra, void *data,
+  * @len: number of bytes to copy
+  *
+  * Copy @len bytes from @gra (guest real address) to @data (kernel space).
+- * It is up to the caller to ensure that the entire guest memory range is
+- * valid memory before calling this function.
+  * Guest key protection is not checked.
+  *
+- * Returns zero on success or -EFAULT on error.
++ * Returns zero on success, -EFAULT when copying to @data failed, or
++ * PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check 
info
++ * is also stored to allow injecting into the guest (if applicable) using
++ * kvm_s390_inject_prog_cond().
+  *
+  * If an error occurs data may have been copied partially to kernel space.
+  */
+diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
+index d9feadffa972da..324686bca36813 100644
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -9,6 +9,8 @@
+ #include <asm/unwind_hints.h>
+ #include <asm/segment.h>
+ #include <asm/cache.h>
++#include <asm/cpufeatures.h>
++#include <asm/nospec-branch.h>
+ 
+ #include "calling.h"
+ 
+@@ -19,6 +21,9 @@ SYM_FUNC_START(entry_ibpb)
+       movl    $PRED_CMD_IBPB, %eax
+       xorl    %edx, %edx
+       wrmsr
++
++      /* Make sure IBPB clears return stack preductions too. */
++      FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
+       RET
+ SYM_FUNC_END(entry_ibpb)
+ /* For KVM */
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index d3a814efbff663..20be5758c2d2e2 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -871,6 +871,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
+ 
+       /* Now ready to switch the cr3 */
+       SWITCH_TO_USER_CR3 scratch_reg=%eax
++      /* Clobbers ZF */
++      CLEAR_CPU_BUFFERS
+ 
+       /*
+        * Restore all flags except IF. (We restore IF separately because
+@@ -881,7 +883,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
+       BUG_IF_WRONG_CR3 no_user_check=1
+       popfl
+       popl    %eax
+-      CLEAR_CPU_BUFFERS
+ 
+       /*
+        * Return back to the vDSO, which will pop ecx and edx.
+@@ -1144,7 +1145,6 @@ SYM_CODE_START(asm_exc_nmi)
+ 
+       /* Not on SYSENTER stack. */
+       call    exc_nmi
+-      CLEAR_CPU_BUFFERS
+       jmp     .Lnmi_return
+ 
+ .Lnmi_from_sysenter_stack:
+@@ -1165,6 +1165,7 @@ SYM_CODE_START(asm_exc_nmi)
+ 
+       CHECK_AND_APPLY_ESPFIX
+       RESTORE_ALL_NMI cr3_reg=%edi pop=4
++      CLEAR_CPU_BUFFERS
+       jmp     .Lirq_return
+ 
+ #ifdef CONFIG_X86_ESPFIX32
+@@ -1206,6 +1207,7 @@ SYM_CODE_START(asm_exc_nmi)
+        *  1 - orig_ax
+        */
+       lss     (1+5+6)*4(%esp), %esp                   # back to espfix stack
++      CLEAR_CPU_BUFFERS
+       jmp     .Lirq_return
+ #endif
+ SYM_CODE_END(asm_exc_nmi)
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index dd4682857c1208..913fd3a7bac650 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -215,7 +215,7 @@
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable 
Speculative Store Bypass. */
+ #define X86_FEATURE_LS_CFG_SSBD               ( 7*32+24)  /* AMD SSBD 
implementation via LS_CFG MSR */
+ #define X86_FEATURE_IBRS              ( 7*32+25) /* "ibrs" Indirect Branch 
Restricted Speculation */
+-#define X86_FEATURE_IBPB              ( 7*32+26) /* "ibpb" Indirect Branch 
Prediction Barrier */
++#define X86_FEATURE_IBPB              ( 7*32+26) /* "ibpb" Indirect Branch 
Prediction Barrier without a guaranteed RSB flush */
+ #define X86_FEATURE_STIBP             ( 7*32+27) /* "stibp" Single Thread 
Indirect Branch Predictors */
+ #define X86_FEATURE_ZEN                       ( 7*32+28) /* Generic flag for 
all Zen and newer */
+ #define X86_FEATURE_L1TF_PTEINV               ( 7*32+29) /* L1TF workaround 
PTE inversion */
+@@ -348,6 +348,7 @@
+ #define X86_FEATURE_CPPC              (13*32+27) /* "cppc" Collaborative 
Processor Performance Control */
+ #define X86_FEATURE_AMD_PSFD            (13*32+28) /* Predictive Store 
Forwarding Disable */
+ #define X86_FEATURE_BTC_NO            (13*32+29) /* Not vulnerable to Branch 
Type Confusion */
++#define X86_FEATURE_AMD_IBPB_RET      (13*32+30) /* IBPB clears return 
address predictor */
+ #define X86_FEATURE_BRS                       (13*32+31) /* "brs" Branch 
Sampling available */
+ 
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+@@ -523,4 +524,5 @@
+ #define X86_BUG_DIV0                  X86_BUG(1*32 + 1) /* "div0" AMD DIV0 
speculation bug */
+ #define X86_BUG_RFDS                  X86_BUG(1*32 + 2) /* "rfds" CPU is 
vulnerable to Register File Data Sampling */
+ #define X86_BUG_BHI                   X86_BUG(1*32 + 3) /* "bhi" CPU is 
affected by Branch History Injection */
++#define X86_BUG_IBPB_NO_RET           X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB 
omits return target predictions */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
+index ff5f1ecc7d1e65..96b410b1d4e841 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -323,7 +323,16 @@
+  * Note: Only the memory operand variant of VERW clears the CPU buffers.
+  */
+ .macro CLEAR_CPU_BUFFERS
+-      ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), 
X86_FEATURE_CLEAR_CPU_BUF
++#ifdef CONFIG_X86_64
++      ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
++#else
++      /*
++       * In 32bit mode, the memory operand must be a %cs reference. The data
++       * segments may not be usable (vm86 mode), and the stack segment may not
++       * be flat (ESPFIX32).
++       */
++      ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
++#endif
+ .endm
+ 
+ #ifdef CONFIG_X86_64
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 373638691cd480..3244ab43fff998 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -440,7 +440,19 @@ static int lapic_timer_shutdown(struct clock_event_device 
*evt)
+       v = apic_read(APIC_LVTT);
+       v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+       apic_write(APIC_LVTT, v);
+-      apic_write(APIC_TMICT, 0);
++
++      /*
++       * Setting APIC_LVT_MASKED (above) should be enough to tell
++       * the hardware that this timer will never fire. But AMD
++       * erratum 411 and some Intel CPU behavior circa 2024 say
++       * otherwise.  Time for belt and suspenders programming: mask
++       * the timer _and_ zero the counter registers:
++       */
++      if (v & APIC_LVT_TIMER_TSCDEADLINE)
++              wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
++      else
++              apic_write(APIC_TMICT, 0);
++
+       return 0;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 1e0fe5f8ab84e4..f01b72052f7908 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1218,5 +1218,6 @@ void amd_check_microcode(void)
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return;
+ 
+-      on_each_cpu(zenbleed_check_cpu, NULL, 1);
++      if (cpu_feature_enabled(X86_FEATURE_ZEN2))
++              on_each_cpu(zenbleed_check_cpu, NULL, 1);
+ }
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 468449f73a9575..2ef649ec32ce5e 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1113,8 +1113,25 @@ static void __init retbleed_select_mitigation(void)
+ 
+       case RETBLEED_MITIGATION_IBPB:
+               setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++
++              /*
++               * IBPB on entry already obviates the need for
++               * software-based untraining so clear those in case some
++               * other mitigation like SRSO has selected them.
++               */
++              setup_clear_cpu_cap(X86_FEATURE_UNRET);
++              setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
++
+               setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+               mitigate_smt = true;
++
++              /*
++               * There is no need for RSB filling: entry_ibpb() ensures
++               * all predictions, including the RSB, are invalidated,
++               * regardless of IBPB implementation.
++               */
++              setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
++
+               break;
+ 
+       case RETBLEED_MITIGATION_STUFF:
+@@ -2621,6 +2638,14 @@ static void __init srso_select_mitigation(void)
+                       if (has_microcode) {
+                               setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+                               srso_mitigation = SRSO_MITIGATION_IBPB;
++
++                              /*
++                               * IBPB on entry already obviates the need for
++                               * software-based untraining so clear those in 
case some
++                               * other mitigation like Retbleed has selected 
them.
++                               */
++                              setup_clear_cpu_cap(X86_FEATURE_UNRET);
++                              setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+                       }
+               } else {
+                       pr_err("WARNING: kernel not compiled with 
MITIGATION_IBPB_ENTRY.\n");
+@@ -2632,6 +2657,13 @@ static void __init srso_select_mitigation(void)
+                       if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && 
has_microcode) {
+                               setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+                               srso_mitigation = 
SRSO_MITIGATION_IBPB_ON_VMEXIT;
++
++                              /*
++                               * There is no need for RSB filling: 
entry_ibpb() ensures
++                               * all predictions, including the RSB, are 
invalidated,
++                               * regardless of IBPB implementation.
++                               */
++                              setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+                       }
+               } else {
+                       pr_err("WARNING: kernel not compiled with 
MITIGATION_SRSO.\n");
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index be307c9ef263d8..ab0e2da7c9ef50 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1443,6 +1443,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 
*c)
+            boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+               setup_force_cpu_bug(X86_BUG_BHI);
+ 
++      if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, 
X86_FEATURE_AMD_IBPB_RET))
++              setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
++
+       if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+               return;
+ 
+diff --git a/arch/x86/kernel/cpu/resctrl/core.c 
b/arch/x86/kernel/cpu/resctrl/core.c
+index 8591d53c144bb1..b681c2e07dbf84 100644
+--- a/arch/x86/kernel/cpu/resctrl/core.c
++++ b/arch/x86/kernel/cpu/resctrl/core.c
+@@ -207,7 +207,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
+       return false;
+ }
+ 
+-static bool __get_mem_config_intel(struct rdt_resource *r)
++static __init bool __get_mem_config_intel(struct rdt_resource *r)
+ {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+       union cpuid_0x10_3_eax eax;
+@@ -241,7 +241,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
+       return true;
+ }
+ 
+-static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
++static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r)
+ {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+       u32 eax, ebx, ecx, edx, subleaf;
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index e3c3c0c21b5536..b56a1c0dd13878 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -4307,6 +4307,12 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set 
*set,
+       /* mark the queue as mq asap */
+       q->mq_ops = set->ops;
+ 
++      /*
++       * ->tag_set has to be setup before initialize hctx, which cpuphp
++       * handler needs it for checking queue mapping
++       */
++      q->tag_set = set;
++
+       if (blk_mq_alloc_ctxs(q))
+               goto err_exit;
+ 
+@@ -4325,8 +4331,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set 
*set,
+       INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
+       blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
+ 
+-      q->tag_set = set;
+-
+       q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
+ 
+       INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
+diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
+index dd7310c94713c9..dc510f493ba572 100644
+--- a/block/blk-rq-qos.c
++++ b/block/blk-rq-qos.c
+@@ -219,8 +219,8 @@ static int rq_qos_wake_function(struct wait_queue_entry 
*curr,
+ 
+       data->got_token = true;
+       smp_wmb();
+-      list_del_init(&curr->entry);
+       wake_up_process(data->task);
++      list_del_init_careful(&curr->entry);
+       return 1;
+ }
+ 
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index bca06bfb4bc32f..2633f7356fac72 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -2381,10 +2381,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+        * TODO: provide forward progress for RECOVERY handler, so that
+        * unprivileged device can benefit from it
+        */
+-      if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
++      if (info.flags & UBLK_F_UNPRIVILEGED_DEV) {
+               info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
+                               UBLK_F_USER_RECOVERY);
+ 
++              /*
++               * For USER_COPY, we depends on userspace to fill request
++               * buffer by pwrite() to ublk char device, which can't be
++               * used for unprivileged device
++               */
++              if (info.flags & UBLK_F_USER_COPY)
++                      return -EINVAL;
++      }
++
+       /* the created device is always owned by current user */
+       ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a1e9b052bc8476..2408e50743ca64 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1399,10 +1399,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, 
gfp_t mem_flags)
+       if (!urb)
+               return -ENOMEM;
+ 
+-      /* Use maximum HCI Event size so the USB stack handles
+-       * ZPL/short-transfer automatically.
+-       */
+-      size = HCI_MAX_EVENT_SIZE;
++      if (le16_to_cpu(data->udev->descriptor.idVendor)  == 0x0a12 &&
++          le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001)
++              /* Fake CSR devices don't seem to support sort-transter */
++              size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
++      else
++              /* Use maximum HCI Event size so the USB stack handles
++               * ZPL/short-transfer automatically.
++               */
++              size = HCI_MAX_EVENT_SIZE;
+ 
+       buf = kmalloc(size, mem_flags);
+       if (!buf) {
+@@ -4092,7 +4097,6 @@ static void btusb_disconnect(struct usb_interface *intf)
+ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
+ {
+       struct btusb_data *data = usb_get_intfdata(intf);
+-      int err;
+ 
+       BT_DBG("intf %p", intf);
+ 
+@@ -4105,16 +4109,6 @@ static int btusb_suspend(struct usb_interface *intf, 
pm_message_t message)
+       if (data->suspend_count++)
+               return 0;
+ 
+-      /* Notify Host stack to suspend; this has to be done before stopping
+-       * the traffic since the hci_suspend_dev itself may generate some
+-       * traffic.
+-       */
+-      err = hci_suspend_dev(data->hdev);
+-      if (err) {
+-              data->suspend_count--;
+-              return err;
+-      }
+-
+       spin_lock_irq(&data->txlock);
+       if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
+               set_bit(BTUSB_SUSPENDING, &data->flags);
+@@ -4122,7 +4116,6 @@ static int btusb_suspend(struct usb_interface *intf, 
pm_message_t message)
+       } else {
+               spin_unlock_irq(&data->txlock);
+               data->suspend_count--;
+-              hci_resume_dev(data->hdev);
+               return -EBUSY;
+       }
+ 
+@@ -4243,8 +4236,6 @@ static int btusb_resume(struct usb_interface *intf)
+       spin_unlock_irq(&data->txlock);
+       schedule_work(&data->work);
+ 
+-      hci_resume_dev(data->hdev);
+-
+       return 0;
+ 
+ failed:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 78b3c067fea7e2..a95811da242b55 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ 
+                       /* Only a single BO list is allowed to simplify 
handling. */
+                       if (p->bo_list)
+-                              ret = -EINVAL;
++                              goto free_partial_kdata;
+ 
+                       ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
+                       if (ret)
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c 
b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+index 48b3c4e4b1cad8..62d792ed0323aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+@@ -595,7 +595,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes 
*mes, int pipe)
+ 
+       if (amdgpu_mes_log_enable) {
+               mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+-              mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = 
mes->event_log_gpu_addr;
++              mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = 
mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
+       }
+ 
+       return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
+@@ -1270,7 +1270,7 @@ static int mes_v12_0_sw_init(void *handle)
+       adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
+       adev->mes.enable_legacy_queue_map = true;
+ 
+-      adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
++      adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES 
* AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE;
+ 
+       r = amdgpu_mes_init(adev);
+       if (r)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 2cf95118456182..87672ca714de5b 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -2226,7 +2226,7 @@ static int smu_bump_power_profile_mode(struct 
smu_context *smu,
+ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+                                         enum amd_dpm_forced_level level,
+                                         bool skip_display_settings,
+-                                        bool force_update)
++                                        bool init)
+ {
+       int ret = 0;
+       int index = 0;
+@@ -2255,7 +2255,7 @@ static int smu_adjust_power_state_dynamic(struct 
smu_context *smu,
+               }
+       }
+ 
+-      if (force_update || smu_dpm_ctx->dpm_level != level) {
++      if (smu_dpm_ctx->dpm_level != level) {
+               ret = smu_asic_set_performance_level(smu, level);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Failed to set performance 
level!");
+@@ -2272,7 +2272,7 @@ static int smu_adjust_power_state_dynamic(struct 
smu_context *smu,
+               index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 
0;
+               workload[0] = smu->workload_setting[index];
+ 
+-              if (force_update || smu->power_profile_mode != workload[0])
++              if (init || smu->power_profile_mode != workload[0])
+                       smu_bump_power_profile_mode(smu, workload, 0);
+       }
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 1d024b122b0c02..cb923e33fd6fc7 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -2555,18 +2555,16 @@ static int smu_v13_0_0_set_power_profile_mode(struct 
smu_context *smu,
+       workload_mask = 1 << workload_type;
+ 
+       /* Add optimizations for SMU13.0.0/10.  Reuse the power saving profile 
*/
+-      if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
+-              if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == 
IP_VERSION(13, 0, 0) &&
+-                      ((smu->adev->pm.fw_version == 0x004e6601) ||
+-                      (smu->adev->pm.fw_version >= 0x004e7300))) ||
+-                      (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == 
IP_VERSION(13, 0, 10) &&
+-                       smu->adev->pm.fw_version >= 0x00504500)) {
+-                      workload_type = smu_cmn_to_asic_specific_index(smu,
+-                                                              
CMN2ASIC_MAPPING_WORKLOAD,
+-                                                              
PP_SMC_POWER_PROFILE_POWERSAVING);
+-                      if (workload_type >= 0)
+-                              workload_mask |= 1 << workload_type;
+-              }
++      if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) 
&&
++           ((smu->adev->pm.fw_version == 0x004e6601) ||
++            (smu->adev->pm.fw_version >= 0x004e7300))) ||
++          (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) 
&&
++           smu->adev->pm.fw_version >= 0x00504500)) {
++              workload_type = smu_cmn_to_asic_specific_index(smu,
++                                                             
CMN2ASIC_MAPPING_WORKLOAD,
++                                                             
PP_SMC_POWER_PROFILE_POWERSAVING);
++              if (workload_type >= 0)
++                      workload_mask |= 1 << workload_type;
+       }
+ 
+       ret = smu_cmn_send_smc_msg_with_param(smu,
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c 
b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 17978a1f9ab0a0..baaa331fbcfa06 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -88,25 +88,19 @@ static int intel_dp_mst_max_dpt_bpp(const struct 
intel_crtc_state *crtc_state,
+ 
+ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
+                                   const struct intel_connector *connector,
+-                                  bool ssc, bool dsc, int bpp_x16)
++                                  bool ssc, int dsc_slice_count, int bpp_x16)
+ {
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->hw.adjusted_mode;
+       unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
+-      int dsc_slice_count = 0;
+       int overhead;
+ 
+       flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
+       flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
+       flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
+ 
+-      if (dsc) {
++      if (dsc_slice_count)
+               flags |= DRM_DP_BW_OVERHEAD_DSC;
+-              dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
+-                                                             
adjusted_mode->clock,
+-                                                             
adjusted_mode->hdisplay,
+-                                                             
crtc_state->joiner_pipes);
+-      }
+ 
+       overhead = drm_dp_bw_overhead(crtc_state->lane_count,
+                                     adjusted_mode->hdisplay,
+@@ -152,6 +146,19 @@ static int intel_dp_mst_calc_pbn(int pixel_clock, int 
bpp_x16, int bw_overhead)
+       return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
+ }
+ 
++static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector 
*connector,
++                                          const struct intel_crtc_state 
*crtc_state)
++{
++      const struct drm_display_mode *adjusted_mode =
++              &crtc_state->hw.adjusted_mode;
++      int num_joined_pipes = crtc_state->joiner_pipes;
++
++      return intel_dp_dsc_get_slice_count(connector,
++                                          adjusted_mode->clock,
++                                          adjusted_mode->hdisplay,
++                                          num_joined_pipes);
++}
++
+ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
+                                               struct intel_crtc_state 
*crtc_state,
+                                               int max_bpp,
+@@ -171,6 +178,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct 
intel_encoder *encoder,
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->hw.adjusted_mode;
+       int bpp, slots = -EINVAL;
++      int dsc_slice_count = 0;
+       int max_dpt_bpp;
+       int ret = 0;
+ 
+@@ -202,6 +210,15 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct 
intel_encoder *encoder,
+       drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp 
%d\n",
+                   min_bpp, max_bpp);
+ 
++      if (dsc) {
++              dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, 
crtc_state);
++              if (!dsc_slice_count) {
++                      drm_dbg_kms(&i915->drm, "Can't get valid DSC slice 
count\n");
++
++                      return -ENOSPC;
++              }
++      }
++
+       for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
+               int local_bw_overhead;
+               int remote_bw_overhead;
+@@ -215,9 +232,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct 
intel_encoder *encoder,
+                                         
intel_dp_output_bpp(crtc_state->output_format, bpp));
+ 
+               local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, 
connector,
+-                                                           false, dsc, 
link_bpp_x16);
++                                                           false, 
dsc_slice_count, link_bpp_x16);
+               remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, 
connector,
+-                                                            true, dsc, 
link_bpp_x16);
++                                                            true, 
dsc_slice_count, link_bpp_x16);
+ 
+               intel_dp_mst_compute_m_n(crtc_state, connector,
+                                        local_bw_overhead,
+@@ -448,6 +465,9 @@ hblank_expansion_quirk_needs_dsc(const struct 
intel_connector *connector,
+       if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
+               return false;
+ 
++      if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state))
++              return false;
++
+       return true;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c 
b/drivers/gpu/drm/radeon/radeon_encoders.c
+index 0f723292409e5a..fafed331e0a03e 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder 
*encoder)
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_encoder *clone_encoder;
+-      uint32_t index_mask = 0;
++      uint32_t index_mask = drm_encoder_mask(encoder);
+       int count;
+ 
+       /* DIG routing gets problematic */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 288ed0bb75cb98..aec624196d6ea7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1283,7 +1283,6 @@ static int vmw_kms_new_framebuffer_surface(struct 
vmw_private *dev_priv,
+ {
+       struct drm_device *dev = &dev_priv->drm;
+       struct vmw_framebuffer_surface *vfbs;
+-      enum SVGA3dSurfaceFormat format;
+       struct vmw_surface *surface;
+       int ret;
+ 
+@@ -1320,34 +1319,6 @@ static int vmw_kms_new_framebuffer_surface(struct 
vmw_private *dev_priv,
+               return -EINVAL;
+       }
+ 
+-      switch (mode_cmd->pixel_format) {
+-      case DRM_FORMAT_ARGB8888:
+-              format = SVGA3D_A8R8G8B8;
+-              break;
+-      case DRM_FORMAT_XRGB8888:
+-              format = SVGA3D_X8R8G8B8;
+-              break;
+-      case DRM_FORMAT_RGB565:
+-              format = SVGA3D_R5G6B5;
+-              break;
+-      case DRM_FORMAT_XRGB1555:
+-              format = SVGA3D_A1R5G5B5;
+-              break;
+-      default:
+-              DRM_ERROR("Invalid pixel format: %p4cc\n",
+-                        &mode_cmd->pixel_format);
+-              return -EINVAL;
+-      }
+-
+-      /*
+-       * For DX, surface format validation is done when surface->scanout
+-       * is set.
+-       */
+-      if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
+-              DRM_ERROR("Invalid surface format for requested mode.\n");
+-              return -EINVAL;
+-      }
+-
+       vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
+       if (!vfbs) {
+               ret = -ENOMEM;
+@@ -1539,6 +1510,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct 
drm_device *dev,
+               DRM_ERROR("Surface size cannot exceed %dx%d\n",
+                       dev_priv->texture_max_width,
+                       dev_priv->texture_max_height);
++              ret = -EINVAL;
+               goto err_out;
+       }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 1625b30d997004..5721c74da3e0b9 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -2276,9 +2276,12 @@ int vmw_dumb_create(struct drm_file *file_priv,
+       const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
+       SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
+                                     SVGA3D_SURFACE_HINT_RENDERTARGET |
+-                                    SVGA3D_SURFACE_SCREENTARGET |
+-                                    SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
+-                                    SVGA3D_SURFACE_BIND_RENDER_TARGET;
++                                    SVGA3D_SURFACE_SCREENTARGET;
++
++      if (vmw_surface_is_dx_screen_target_format(format)) {
++              flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
++                       SVGA3D_SURFACE_BIND_RENDER_TARGET;
++      }
+ 
+       /*
+        * Without mob support we're just going to use raw memory buffer
+diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
+index 80499681bd583d..de80c8b7c8913c 100644
+--- a/drivers/gpu/drm/xe/xe_sync.c
++++ b/drivers/gpu/drm/xe/xe_sync.c
+@@ -58,7 +58,7 @@ static struct xe_user_fence *user_fence_create(struct 
xe_device *xe, u64 addr,
+       if (!access_ok(ptr, sizeof(*ptr)))
+               return ERR_PTR(-EFAULT);
+ 
+-      ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
++      ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
+       if (!ufence)
+               return ERR_PTR(-ENOMEM);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c 
b/drivers/gpu/drm/xe/xe_wait_user_fence.c
+index f69721339201d6..92f65b9c528015 100644
+--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
++++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
+@@ -169,9 +169,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void 
*data,
+                       args->timeout = 0;
+       }
+ 
+-      if (!timeout && !(err < 0))
+-              err = -ETIME;
+-
+       if (q)
+               xe_exec_queue_put(q);
+ 
+diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
+index 80b57d3ee3a726..c5b1db001d6f33 100644
+--- a/drivers/iio/accel/Kconfig
++++ b/drivers/iio/accel/Kconfig
+@@ -420,6 +420,8 @@ config IIO_ST_ACCEL_SPI_3AXIS
+ 
+ config IIO_KX022A
+       tristate
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+ 
+ config IIO_KX022A_SPI
+       tristate "Kionix KX022A tri-axis digital accelerometer SPI interface"
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index f60fe85a30d529..cceac30e2bb9f9 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -52,6 +52,8 @@ config AD7091R8
+       depends on SPI
+       select AD7091R
+       select REGMAP_SPI
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Say yes here to build support for Analog Devices AD7091R-2, AD7091R-4,
+         and AD7091R-8 ADC.
+@@ -305,6 +307,8 @@ config AD7923
+ config AD7944
+       tristate "Analog Devices AD7944 and similar ADCs driver"
+       depends on SPI
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Say yes here to build support for Analog Devices
+         AD7944, AD7985, AD7986 ADCs.
+@@ -1433,6 +1437,8 @@ config TI_ADS8344
+ config TI_ADS8688
+       tristate "Texas Instruments ADS8688"
+       depends on SPI
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         If you say yes here you get support for Texas Instruments ADS8684 and
+         and ADS8688 ADC chips
+@@ -1443,6 +1449,8 @@ config TI_ADS8688
+ config TI_ADS124S08
+       tristate "Texas Instruments ADS124S08"
+       depends on SPI
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         If you say yes here you get support for Texas Instruments ADS124S08
+         and ADS124S06 ADC chips
+@@ -1477,6 +1485,7 @@ config TI_AM335X_ADC
+ config TI_LMP92064
+       tristate "Texas Instruments LMP92064 ADC driver"
+       depends on SPI
++      select REGMAP_SPI
+       help
+         Say yes here to build support for the LMP92064 Precision Current and 
Voltage
+         sensor.
+diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig
+index b54fe01734b0d7..55eb16b32f6c9a 100644
+--- a/drivers/iio/amplifiers/Kconfig
++++ b/drivers/iio/amplifiers/Kconfig
+@@ -27,6 +27,7 @@ config AD8366
+ config ADA4250
+       tristate "Analog Devices ADA4250 Instrumentation Amplifier"
+       depends on SPI
++      select REGMAP_SPI
+       help
+         Say yes here to build support for Analog Devices ADA4250
+         SPI Amplifier's support. The driver provides direct access via
+diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
+index 678a6adb9a7583..6c87223f58d903 100644
+--- a/drivers/iio/chemical/Kconfig
++++ b/drivers/iio/chemical/Kconfig
+@@ -80,6 +80,8 @@ config ENS160
+       tristate "ScioSense ENS160 sensor driver"
+       depends on (I2C || SPI)
+       select REGMAP
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       select ENS160_I2C if I2C
+       select ENS160_SPI if SPI
+       help
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c 
b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index ad8910e6ad59df..abb09fefc792c5 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -32,7 +32,7 @@ static ssize_t _hid_sensor_set_report_latency(struct device 
*dev,
+       latency = integer * 1000 + fract / 1000;
+       ret = hid_sensor_set_report_latency(attrb, latency);
+       if (ret < 0)
+-              return len;
++              return ret;
+ 
+       attrb->latency_ms = hid_sensor_get_report_latency(attrb);
+ 
+diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
+index a2596c2d3de316..d2012c91dea8b2 100644
+--- a/drivers/iio/dac/Kconfig
++++ b/drivers/iio/dac/Kconfig
+@@ -9,6 +9,8 @@ menu "Digital to analog converters"
+ config AD3552R
+       tristate "Analog Devices AD3552R DAC driver"
+       depends on SPI_MASTER
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Say yes here to build support for Analog Devices AD3552R
+         Digital to Analog Converter.
+@@ -252,6 +254,8 @@ config AD5764
+ config AD5766
+       tristate "Analog Devices AD5766/AD5767 DAC driver"
+       depends on SPI_MASTER
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Say yes here to build support for Analog Devices AD5766, AD5767
+         Digital to Analog Converter.
+@@ -262,6 +266,7 @@ config AD5766
+ config AD5770R
+       tristate "Analog Devices AD5770R IDAC driver"
+       depends on SPI_MASTER
++      select REGMAP_SPI
+       help
+         Say yes here to build support for Analog Devices AD5770R Digital to
+         Analog Converter.
+@@ -353,6 +358,7 @@ config LPC18XX_DAC
+ config LTC1660
+       tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver"
+       depends on SPI
++      select REGMAP_SPI
+       help
+         Say yes here to build support for Linear Technology
+         LTC1660 and LTC1665 Digital to Analog Converters.
+@@ -472,6 +478,7 @@ config STM32_DAC
+ 
+ config STM32_DAC_CORE
+       tristate
++      select REGMAP_MMIO
+ 
+ config TI_DAC082S085
+       tristate "Texas Instruments 8/10/12-bit 2/4-channel DAC driver"
+diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
+index c455be7d4a1c88..89ae09db5ca5fc 100644
+--- a/drivers/iio/frequency/Kconfig
++++ b/drivers/iio/frequency/Kconfig
+@@ -53,6 +53,7 @@ config ADF4371
+ config ADF4377
+       tristate "Analog Devices ADF4377 Microwave Wideband Synthesizer"
+       depends on SPI && COMMON_CLK
++      select REGMAP_SPI
+       help
+         Say yes here to build support for Analog Devices ADF4377 Microwave
+         Wideband Synthesizer.
+diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
+index b68dcc1fbaca4c..c63fe9228ddba8 100644
+--- a/drivers/iio/light/Kconfig
++++ b/drivers/iio/light/Kconfig
+@@ -322,6 +322,8 @@ config ROHM_BU27008
+       depends on I2C
+       select REGMAP_I2C
+       select IIO_GTS_HELPER
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Enable support for the ROHM BU27008 color sensor.
+         The ROHM BU27008 is a sensor with 5 photodiodes (red, green,
+diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
+index 887c4b776a8696..176e54bb48c33b 100644
+--- a/drivers/iio/light/opt3001.c
++++ b/drivers/iio/light/opt3001.c
+@@ -138,6 +138,10 @@ static const struct opt3001_scale opt3001_scales[] = {
+               .val = 20966,
+               .val2 = 400000,
+       },
++      {
++              .val = 41932,
++              .val2 = 800000,
++      },
+       {
+               .val = 83865,
+               .val2 = 600000,
+diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
+index 2e86d310952ede..9630de1c578ecb 100644
+--- a/drivers/iio/light/veml6030.c
++++ b/drivers/iio/light/veml6030.c
+@@ -99,9 +99,8 @@ static const char * const period_values[] = {
+ static ssize_t in_illuminance_period_available_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+ {
++      struct veml6030_data *data = iio_priv(dev_to_iio_dev(dev));
+       int ret, reg, x;
+-      struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+-      struct veml6030_data *data = iio_priv(indio_dev);
+ 
+       ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+       if (ret) {
+@@ -780,7 +779,7 @@ static int veml6030_hw_init(struct iio_dev *indio_dev)
+ 
+       /* Cache currently active measurement parameters */
+       data->cur_gain = 3;
+-      data->cur_resolution = 4608;
++      data->cur_resolution = 5376;
+       data->cur_integration_time = 3;
+ 
+       return ret;
+diff --git a/drivers/iio/magnetometer/Kconfig 
b/drivers/iio/magnetometer/Kconfig
+index cd2917d719047b..8d076fdd5f5db4 100644
+--- a/drivers/iio/magnetometer/Kconfig
++++ b/drivers/iio/magnetometer/Kconfig
+@@ -11,6 +11,8 @@ config AF8133J
+       depends on I2C
+       depends on OF
+       select REGMAP_I2C
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Say yes here to build support for Voltafield AF8133J I2C-based
+         3-axis magnetometer chip.
+diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
+index 3ad38506028ef0..346dace9d651de 100644
+--- a/drivers/iio/pressure/Kconfig
++++ b/drivers/iio/pressure/Kconfig
+@@ -19,6 +19,9 @@ config ABP060MG
+ config ROHM_BM1390
+       tristate "ROHM BM1390GLV-Z pressure sensor driver"
+       depends on I2C
++      select REGMAP_I2C
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Support for the ROHM BM1390 pressure sensor. The BM1390GLV-Z
+         can measure pressures ranging from 300 hPa to 1300 hPa with
+diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
+index 2ca3b0bc5eba10..931eaea046b328 100644
+--- a/drivers/iio/proximity/Kconfig
++++ b/drivers/iio/proximity/Kconfig
+@@ -72,6 +72,8 @@ config LIDAR_LITE_V2
+ config MB1232
+       tristate "MaxSonar I2CXL family ultrasonic sensors"
+       depends on I2C
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Say Y to build a driver for the ultrasonic sensors I2CXL of
+         MaxBotix which have an i2c interface. It can be used to measure
+diff --git a/drivers/iio/resolver/Kconfig b/drivers/iio/resolver/Kconfig
+index 424529d36080e8..de2dee3832a1a3 100644
+--- a/drivers/iio/resolver/Kconfig
++++ b/drivers/iio/resolver/Kconfig
+@@ -31,6 +31,9 @@ config AD2S1210
+       depends on SPI
+       depends on COMMON_CLK
+       depends on GPIOLIB || COMPILE_TEST
++      select REGMAP
++      select IIO_BUFFER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Say yes here to build support for Analog Devices spi resolver
+         to digital converters, ad2s1210, provides direct access via sysfs.
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 4eda18f4f46e39..22ea58bf76cb5c 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -218,6 +218,7 @@ static const struct xpad_device {
+       { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+       { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+       { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, 
XTYPE_XBOX },
++      { 0x0db0, 0x1901, "Micro Star International Xbox360 Controller for 
Windows", 0, XTYPE_XBOX360 },
+       { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
+       { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, 
XTYPE_XBOX },
+       { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
+@@ -373,6 +374,7 @@ static const struct xpad_device {
+       { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
+       { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, 
XTYPE_XBOXONE },
+       { 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
++      { 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, 
XTYPE_XBOX360 },
+       { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+       { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+       { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+@@ -492,6 +494,7 @@ static const struct usb_device_id xpad_table[] = {
+       XPAD_XBOX360_VENDOR(0x07ff),            /* Mad Catz Gamepad */
+       XPAD_XBOXONE_VENDOR(0x0b05),            /* ASUS controllers */
+       XPAD_XBOX360_VENDOR(0x0c12),            /* Zeroplus X-Box 360 
controllers */
++      XPAD_XBOX360_VENDOR(0x0db0),            /* Micro Star International 
X-Box 360 controllers */
+       XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f Xbox 360 controllers 
*/
+       XPAD_XBOXONE_VENDOR(0x0e6f),            /* 0x0e6f Xbox One controllers 
*/
+       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori controllers */
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index e3e513cabc86ac..dda6dea7cce099 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3520,8 +3520,10 @@ static int domain_context_clear_one_cb(struct pci_dev 
*pdev, u16 alias, void *op
+  */
+ static void domain_context_clear(struct device_domain_info *info)
+ {
+-      if (!dev_is_pci(info->dev))
++      if (!dev_is_pci(info->dev)) {
+               domain_context_clear_one(info, info->bus, info->devfn);
++              return;
++      }
+ 
+       pci_for_each_dma_alias(to_pci_dev(info->dev),
+                              &domain_context_clear_one_cb, info);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c 
b/drivers/irqchip/irq-gic-v3-its.c
+index fdec478ba5e70a..ab597e74ba08ef 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -797,8 +797,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node 
*its,
+       its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+ 
+       if (!desc->its_vmapp_cmd.valid) {
++              alloc = 
!atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+               if (is_v4_1(its)) {
+-                      alloc = 
!atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+                       its_encode_alloc(cmd, alloc);
+                       /*
+                        * Unmapping a VPE is self-synchronizing on GICv4.1,
+@@ -817,13 +817,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct 
its_node *its,
+       its_encode_vpt_addr(cmd, vpt_addr);
+       its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+ 
++      alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
++
+       if (!is_v4_1(its))
+               goto out;
+ 
+       vconf_addr = 
virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
+ 
+-      alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+-
+       its_encode_alloc(cmd, alloc);
+ 
+       /*
+@@ -3806,6 +3806,13 @@ static int its_vpe_set_affinity(struct irq_data *d,
+       struct cpumask *table_mask;
+       unsigned long flags;
+ 
++      /*
++       * Check if we're racing against a VPE being destroyed, for
++       * which we don't want to allow a VMOVP.
++       */
++      if (!atomic_read(&vpe->vmapp_count))
++              return -EINVAL;
++
+       /*
+        * Changing affinity is mega expensive, so let's be as lazy as
+        * we can and only do it if we really have to. Also, if mapped
+@@ -4463,9 +4470,8 @@ static int its_vpe_init(struct its_vpe *vpe)
+       raw_spin_lock_init(&vpe->vpe_lock);
+       vpe->vpe_id = vpe_id;
+       vpe->vpt_page = vpt_page;
+-      if (gic_rdists->has_rvpeid)
+-              atomic_set(&vpe->vmapp_count, 0);
+-      else
++      atomic_set(&vpe->vmapp_count, 0);
++      if (!gic_rdists->has_rvpeid)
+               vpe->vpe_proxy_event = -1;
+ 
+       return 0;
+diff --git a/drivers/irqchip/irq-sifive-plic.c 
b/drivers/irqchip/irq-sifive-plic.c
+index 4d9ea718086d30..70867bea560f78 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -123,16 +123,6 @@ static inline void plic_irq_toggle(const struct cpumask 
*mask,
+       }
+ }
+ 
+-static void plic_irq_enable(struct irq_data *d)
+-{
+-      plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
+-}
+-
+-static void plic_irq_disable(struct irq_data *d)
+-{
+-      plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
+-}
+-
+ static void plic_irq_unmask(struct irq_data *d)
+ {
+       struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+@@ -147,6 +137,17 @@ static void plic_irq_mask(struct irq_data *d)
+       writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+ }
+ 
++static void plic_irq_enable(struct irq_data *d)
++{
++      plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
++      plic_irq_unmask(d);
++}
++
++static void plic_irq_disable(struct irq_data *d)
++{
++      plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
++}
++
+ static void plic_irq_eoi(struct irq_data *d)
+ {
+       struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+@@ -577,8 +578,10 @@ static int plic_probe(struct fwnode_handle *fwnode)
+ 
+               handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
+                                              sizeof(*handler->enable_save), 
GFP_KERNEL);
+-              if (!handler->enable_save)
++              if (!handler->enable_save) {
++                      error = -ENOMEM;
+                       goto fail_cleanup_contexts;
++              }
+ done:
+               for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
+                       plic_toggle(handler, hwirq, 0);
+@@ -590,8 +593,10 @@ static int plic_probe(struct fwnode_handle *fwnode)
+ 
+       priv->irqdomain = irq_domain_add_linear(to_of_node(fwnode), nr_irqs + 1,
+                                               &plic_irqdomain_ops, priv);
+-      if (WARN_ON(!priv->irqdomain))
++      if (WARN_ON(!priv->irqdomain)) {
++              error = -ENOMEM;
+               goto fail_cleanup_contexts;
++      }
+ 
+       /*
+        * We can have multiple PLIC instances so setup global state
+diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c 
b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
+index 7c3d8bedf90ba2..a2ed477e0370bc 100644
+--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
+@@ -364,6 +364,7 @@ static int pci1xxxx_otp_eeprom_probe(struct 
auxiliary_device *aux_dev,
+       if (is_eeprom_responsive(priv)) {
+               priv->nvmem_config_eeprom.type = NVMEM_TYPE_EEPROM;
+               priv->nvmem_config_eeprom.name = EEPROM_NAME;
++              priv->nvmem_config_eeprom.id = NVMEM_DEVID_AUTO;
+               priv->nvmem_config_eeprom.dev = &aux_dev->dev;
+               priv->nvmem_config_eeprom.owner = THIS_MODULE;
+               priv->nvmem_config_eeprom.reg_read = pci1xxxx_eeprom_read;
+@@ -383,6 +384,7 @@ static int pci1xxxx_otp_eeprom_probe(struct 
auxiliary_device *aux_dev,
+ 
+       priv->nvmem_config_otp.type = NVMEM_TYPE_OTP;
+       priv->nvmem_config_otp.name = OTP_NAME;
++      priv->nvmem_config_otp.id = NVMEM_DEVID_AUTO;
+       priv->nvmem_config_otp.dev = &aux_dev->dev;
+       priv->nvmem_config_otp.owner = THIS_MODULE;
+       priv->nvmem_config_otp.reg_read = pci1xxxx_otp_read;
+diff --git a/drivers/net/ethernet/cadence/macb_main.c 
b/drivers/net/ethernet/cadence/macb_main.c
+index dcd3f54ed0cf00..c1dcd93f6b1c3a 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -930,9 +930,6 @@ static int macb_mdiobus_register(struct macb *bp)
+               return ret;
+       }
+ 
+-      if (of_phy_is_fixed_link(np))
+-              return mdiobus_register(bp->mii_bus);
+-
+       /* Only create the PHY from the device tree if at least one PHY is
+        * described. Otherwise scan the entire MDIO bus. We do this to support
+        * old device tree that did not follow the best practices and did not
+@@ -953,8 +950,19 @@ static int macb_mdiobus_register(struct macb *bp)
+ 
+ static int macb_mii_init(struct macb *bp)
+ {
++      struct device_node *child, *np = bp->pdev->dev.of_node;
+       int err = -ENXIO;
+ 
++      /* With fixed-link, we don't need to register the MDIO bus,
++       * except if we have a child named "mdio" in the device tree.
++       * In that case, some devices may be attached to the MACB's MDIO bus.
++       */
++      child = of_get_child_by_name(np, "mdio");
++      if (child)
++              of_node_put(child);
++      else if (of_phy_is_fixed_link(np))
++              return macb_mii_probe(bp->dev);
++
+       /* Enable management port */
+       macb_writel(bp, NCR, MACB_BIT(MPE));
+ 
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c 
b/drivers/net/ethernet/freescale/enetc/enetc.c
+index f04f42ea60c0f7..0b6b92b04b91d5 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -902,6 +902,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, 
int napi_budget)
+ 
+       if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
+                    __netif_subqueue_stopped(ndev, tx_ring->index) &&
++                   !test_bit(ENETC_TX_DOWN, &priv->flags) &&
+                    (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+               netif_wake_subqueue(ndev, tx_ring->index);
+       }
+@@ -1380,6 +1381,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int 
num_frames,
+       int xdp_tx_bd_cnt, i, k;
+       int xdp_tx_frm_cnt = 0;
+ 
++      if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
++              return -ENETDOWN;
++
+       enetc_lock_mdio();
+ 
+       tx_ring = priv->xdp_tx_ring[smp_processor_id()];
+@@ -1524,7 +1528,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, 
int rx_ring_first,
+                                 &rx_ring->rx_swbd[rx_ring_first]);
+               enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+       }
+-      rx_ring->stats.xdp_drops++;
+ }
+ 
+ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+@@ -1589,6 +1592,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr 
*rx_ring,
+                       fallthrough;
+               case XDP_DROP:
+                       enetc_xdp_drop(rx_ring, orig_i, i);
++                      rx_ring->stats.xdp_drops++;
+                       break;
+               case XDP_PASS:
+                       rxbd = orig_rxbd;
+@@ -1605,6 +1609,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr 
*rx_ring,
+                       break;
+               case XDP_TX:
+                       tx_ring = priv->xdp_tx_ring[rx_ring->index];
++                      if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
++                              enetc_xdp_drop(rx_ring, orig_i, i);
++                              tx_ring->stats.xdp_tx_drops++;
++                              break;
++                      }
++
+                       xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
+                                                                    rx_ring,
+                                                                    orig_i, i);
+@@ -2226,18 +2236,24 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, 
struct enetc_bdr *rx_ring)
+       enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+ }
+ 
+-static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
++static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
+ {
+       struct enetc_hw *hw = &priv->si->hw;
+       int i;
+ 
+-      for (i = 0; i < priv->num_tx_rings; i++)
+-              enetc_enable_txbdr(hw, priv->tx_ring[i]);
+-
+       for (i = 0; i < priv->num_rx_rings; i++)
+               enetc_enable_rxbdr(hw, priv->rx_ring[i]);
+ }
+ 
++static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
++{
++      struct enetc_hw *hw = &priv->si->hw;
++      int i;
++
++      for (i = 0; i < priv->num_tx_rings; i++)
++              enetc_enable_txbdr(hw, priv->tx_ring[i]);
++}
++
+ static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr 
*rx_ring)
+ {
+       int idx = rx_ring->index;
+@@ -2254,18 +2270,24 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, 
struct enetc_bdr *rx_ring)
+       enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+ }
+ 
+-static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
++static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
+ {
+       struct enetc_hw *hw = &priv->si->hw;
+       int i;
+ 
+-      for (i = 0; i < priv->num_tx_rings; i++)
+-              enetc_disable_txbdr(hw, priv->tx_ring[i]);
+-
+       for (i = 0; i < priv->num_rx_rings; i++)
+               enetc_disable_rxbdr(hw, priv->rx_ring[i]);
+ }
+ 
++static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
++{
++      struct enetc_hw *hw = &priv->si->hw;
++      int i;
++
++      for (i = 0; i < priv->num_tx_rings; i++)
++              enetc_disable_txbdr(hw, priv->tx_ring[i]);
++}
++
+ static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+ {
+       int delay = 8, timeout = 100;
+@@ -2463,9 +2485,13 @@ void enetc_start(struct net_device *ndev)
+               enable_irq(irq);
+       }
+ 
+-      enetc_enable_bdrs(priv);
++      enetc_enable_tx_bdrs(priv);
++
++      enetc_enable_rx_bdrs(priv);
+ 
+       netif_tx_start_all_queues(ndev);
++
++      clear_bit(ENETC_TX_DOWN, &priv->flags);
+ }
+ EXPORT_SYMBOL_GPL(enetc_start);
+ 
+@@ -2523,9 +2549,15 @@ void enetc_stop(struct net_device *ndev)
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int i;
+ 
++      set_bit(ENETC_TX_DOWN, &priv->flags);
++
+       netif_tx_stop_all_queues(ndev);
+ 
+-      enetc_disable_bdrs(priv);
++      enetc_disable_rx_bdrs(priv);
++
++      enetc_wait_bdrs(priv);
++
++      enetc_disable_tx_bdrs(priv);
+ 
+       for (i = 0; i < priv->bdr_int_num; i++) {
+               int irq = pci_irq_vector(priv->si->pdev,
+@@ -2536,8 +2568,6 @@ void enetc_stop(struct net_device *ndev)
+               napi_disable(&priv->int_vector[i]->napi);
+       }
+ 
+-      enetc_wait_bdrs(priv);
+-
+       enetc_clear_interrupts(priv);
+ }
+ EXPORT_SYMBOL_GPL(enetc_stop);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h 
b/drivers/net/ethernet/freescale/enetc/enetc.h
+index a9c2ff22431c57..4f3b314aeead98 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
+@@ -328,6 +328,7 @@ enum enetc_active_offloads {
+ 
+ enum enetc_flags_bit {
+       ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
++      ENETC_TX_DOWN,
+ };
+ 
+ /* interrupt coalescing modes */
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c 
b/drivers/net/ethernet/freescale/fec_ptp.c
+index 5e8fac50f945d4..a4eb6edb850add 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -90,6 +90,30 @@
+ #define FEC_PTP_MAX_NSEC_PERIOD               4000000000ULL
+ #define FEC_PTP_MAX_NSEC_COUNTER      0x80000000ULL
+ 
++/**
++ * fec_ptp_read - read raw cycle counter (to be used by time counter)
++ * @cc: the cyclecounter structure
++ *
++ * this function reads the cyclecounter registers and is called by the
++ * cyclecounter structure used to construct a ns counter from the
++ * arbitrary fixed point registers
++ */
++static u64 fec_ptp_read(const struct cyclecounter *cc)
++{
++      struct fec_enet_private *fep =
++              container_of(cc, struct fec_enet_private, cc);
++      u32 tempval;
++
++      tempval = readl(fep->hwp + FEC_ATIME_CTRL);
++      tempval |= FEC_T_CTRL_CAPTURE;
++      writel(tempval, fep->hwp + FEC_ATIME_CTRL);
++
++      if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
++              udelay(1);
++
++      return readl(fep->hwp + FEC_ATIME);
++}
++
+ /**
+  * fec_ptp_enable_pps
+  * @fep: the fec_enet_private structure handle
+@@ -136,7 +160,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private 
*fep, uint enable)
+                * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
+                * to current timer would be next second.
+                */
+-              tempval = fep->cc.read(&fep->cc);
++              tempval = fec_ptp_read(&fep->cc);
+               /* Convert the ptp local counter to 1588 timestamp */
+               ns = timecounter_cyc2time(&fep->tc, tempval);
+               ts = ns_to_timespec64(ns);
+@@ -211,13 +235,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private 
*fep)
+       timecounter_read(&fep->tc);
+ 
+       /* Get the current ptp hardware time counter */
+-      temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
+-      temp_val |= FEC_T_CTRL_CAPTURE;
+-      writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
+-      if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+-              udelay(1);
+-
+-      ptp_hc = readl(fep->hwp + FEC_ATIME);
++      ptp_hc = fec_ptp_read(&fep->cc);
+ 
+       /* Convert the ptp local counter to 1588 timestamp */
+       curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
+@@ -271,30 +289,6 @@ static enum hrtimer_restart 
fec_ptp_pps_perout_handler(struct hrtimer *timer)
+       return HRTIMER_NORESTART;
+ }
+ 
+-/**
+- * fec_ptp_read - read raw cycle counter (to be used by time counter)
+- * @cc: the cyclecounter structure
+- *
+- * this function reads the cyclecounter registers and is called by the
+- * cyclecounter structure used to construct a ns counter from the
+- * arbitrary fixed point registers
+- */
+-static u64 fec_ptp_read(const struct cyclecounter *cc)
+-{
+-      struct fec_enet_private *fep =
+-              container_of(cc, struct fec_enet_private, cc);
+-      u32 tempval;
+-
+-      tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+-      tempval |= FEC_T_CTRL_CAPTURE;
+-      writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+-
+-      if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+-              udelay(1);
+-
+-      return readl(fep->hwp + FEC_ATIME);
+-}
+-
+ /**
+  * fec_ptp_start_cyclecounter - create the cycle counter from hw
+  * @ndev: network device
+diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c 
b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+index f2a5a36fdacd43..7251121ab196e3 100644
+--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
++++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+@@ -1444,6 +1444,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
+ 
+       ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
+       KUNIT_EXPECT_EQ(test, 0, ret);
++
++      vcap_free_rule(rule);
+ }
+ 
+ static void vcap_api_set_rule_counter_test(struct kunit *test)
+diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
+index 3ef486cd3d6d57..3880460e67f25a 100644
+--- a/drivers/parport/procfs.c
++++ b/drivers/parport/procfs.c
+@@ -51,12 +51,12 @@ static int do_active_device(const struct ctl_table *table, 
int write,
+       
+       for (dev = port->devices; dev ; dev = dev->next) {
+               if(dev == port->cad) {
+-                      len += snprintf(buffer, sizeof(buffer), "%s\n", 
dev->name);
++                      len += scnprintf(buffer, sizeof(buffer), "%s\n", 
dev->name);
+               }
+       }
+ 
+       if(!len) {
+-              len += snprintf(buffer, sizeof(buffer), "%s\n", "none");
++              len += scnprintf(buffer, sizeof(buffer), "%s\n", "none");
+       }
+ 
+       if (len > *lenp)
+@@ -87,19 +87,19 @@ static int do_autoprobe(const struct ctl_table *table, int 
write,
+       }
+       
+       if ((str = info->class_name) != NULL)
+-              len += snprintf (buffer + len, sizeof(buffer) - len, 
"CLASS:%s;\n", str);
++              len += scnprintf (buffer + len, sizeof(buffer) - len, 
"CLASS:%s;\n", str);
+ 
+       if ((str = info->model) != NULL)
+-              len += snprintf (buffer + len, sizeof(buffer) - len, 
"MODEL:%s;\n", str);
++              len += scnprintf (buffer + len, sizeof(buffer) - len, 
"MODEL:%s;\n", str);
+ 
+       if ((str = info->mfr) != NULL)
+-              len += snprintf (buffer + len, sizeof(buffer) - len, 
"MANUFACTURER:%s;\n", str);
++              len += scnprintf (buffer + len, sizeof(buffer) - len, 
"MANUFACTURER:%s;\n", str);
+ 
+       if ((str = info->description) != NULL)
+-              len += snprintf (buffer + len, sizeof(buffer) - len, 
"DESCRIPTION:%s;\n", str);
++              len += scnprintf (buffer + len, sizeof(buffer) - len, 
"DESCRIPTION:%s;\n", str);
+ 
+       if ((str = info->cmdset) != NULL)
+-              len += snprintf (buffer + len, sizeof(buffer) - len, "COMMAND 
SET:%s;\n", str);
++              len += scnprintf (buffer + len, sizeof(buffer) - len, "COMMAND 
SET:%s;\n", str);
+ 
+       if (len > *lenp)
+               len = *lenp;
+@@ -128,7 +128,7 @@ static int do_hardware_base_addr(const struct ctl_table 
*table, int write,
+       if (write) /* permissions prevent this anyway */
+               return -EACCES;
+ 
+-      len += snprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, 
port->base_hi);
++      len += scnprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, 
port->base_hi);
+ 
+       if (len > *lenp)
+               len = *lenp;
+@@ -155,7 +155,7 @@ static int do_hardware_irq(const struct ctl_table *table, 
int write,
+       if (write) /* permissions prevent this anyway */
+               return -EACCES;
+ 
+-      len += snprintf (buffer, sizeof(buffer), "%d\n", port->irq);
++      len += scnprintf (buffer, sizeof(buffer), "%d\n", port->irq);
+ 
+       if (len > *lenp)
+               len = *lenp;
+@@ -182,7 +182,7 @@ static int do_hardware_dma(const struct ctl_table *table, 
int write,
+       if (write) /* permissions prevent this anyway */
+               return -EACCES;
+ 
+-      len += snprintf (buffer, sizeof(buffer), "%d\n", port->dma);
++      len += scnprintf (buffer, sizeof(buffer), "%d\n", port->dma);
+ 
+       if (len > *lenp)
+               len = *lenp;
+@@ -213,7 +213,7 @@ static int do_hardware_modes(const struct ctl_table 
*table, int write,
+ #define printmode(x)                                                  \
+ do {                                                                  \
+       if (port->modes & PARPORT_MODE_##x)                             \
+-              len += snprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ 
? "," : "", #x); \
++              len += scnprintf(buffer + len, sizeof(buffer) - len, "%s%s", 
f++ ? "," : "", #x); \
+ } while (0)
+               int f = 0;
+               printmode(PCSPP);
+diff --git a/drivers/pinctrl/intel/pinctrl-intel-platform.c 
b/drivers/pinctrl/intel/pinctrl-intel-platform.c
+index 4a19ab3b4ba743..2d5ba8278fb9bc 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel-platform.c
++++ b/drivers/pinctrl/intel/pinctrl-intel-platform.c
+@@ -90,7 +90,6 @@ static int intel_platform_pinctrl_prepare_community(struct 
device *dev,
+                                                   struct intel_community 
*community,
+                                                   struct intel_platform_pins 
*pins)
+ {
+-      struct fwnode_handle *child;
+       struct intel_padgroup *gpps;
+       unsigned int group;
+       size_t ngpps;
+@@ -131,7 +130,7 @@ static int intel_platform_pinctrl_prepare_community(struct 
device *dev,
+               return -ENOMEM;
+ 
+       group = 0;
+-      device_for_each_child_node(dev, child) {
++      device_for_each_child_node_scoped(dev, child) {
+               struct intel_padgroup *gpp = &gpps[group];
+ 
+               gpp->reg_num = group;
+diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c 
b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+index 1fa00a23534a9d..59c4e7c6cddea1 100644
+--- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c
++++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+@@ -218,7 +218,7 @@ static int ma35_pinctrl_dt_node_to_map_func(struct 
pinctrl_dev *pctldev,
+       }
+ 
+       map_num += grp->npins;
+-      new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map), 
GFP_KERNEL);
++      new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL);
+       if (!new_map)
+               return -ENOMEM;
+ 
+diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c 
b/drivers/pinctrl/pinctrl-apple-gpio.c
+index 3751c7de37aa9f..f861e63f411521 100644
+--- a/drivers/pinctrl/pinctrl-apple-gpio.c
++++ b/drivers/pinctrl/pinctrl-apple-gpio.c
+@@ -474,6 +474,9 @@ static int apple_gpio_pinctrl_probe(struct platform_device 
*pdev)
+       for (i = 0; i < npins; i++) {
+               pins[i].number = i;
+               pins[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "PIN%u", 
i);
++              if (!pins[i].name)
++                      return -ENOMEM;
++
+               pins[i].drv_data = pctl;
+               pin_names[i] = pins[i].name;
+               pin_nums[i] = i;
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c 
b/drivers/pinctrl/pinctrl-ocelot.c
+index be9b8c01016708..d1ab8450ea93eb 100644
+--- a/drivers/pinctrl/pinctrl-ocelot.c
++++ b/drivers/pinctrl/pinctrl-ocelot.c
+@@ -1955,21 +1955,21 @@ static void ocelot_irq_handler(struct irq_desc *desc)
+       unsigned int reg = 0, irq, i;
+       unsigned long irqs;
+ 
++      chained_irq_enter(parent_chip, desc);
++
+       for (i = 0; i < info->stride; i++) {
+               regmap_read(info->map, id_reg + 4 * i, &reg);
+               if (!reg)
+                       continue;
+ 
+-              chained_irq_enter(parent_chip, desc);
+-
+               irqs = reg;
+ 
+               for_each_set_bit(irq, &irqs,
+                                min(32U, info->desc->npins - 32 * i))
+                       generic_handle_domain_irq(chip->irq.domain, irq + 32 * 
i);
+-
+-              chained_irq_exit(parent_chip, desc);
+       }
++
++      chained_irq_exit(parent_chip, desc);
+ }
+ 
+ static int ocelot_gpiochip_register(struct platform_device *pdev,
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c 
b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index a8673739871d81..5b7fa77c118436 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -1374,10 +1374,15 @@ static int stm32_gpiolib_register_bank(struct 
stm32_pinctrl *pctl, struct fwnode
+ 
+       for (i = 0; i < npins; i++) {
+               stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
+-              if (stm32_pin && stm32_pin->pin.name)
++              if (stm32_pin && stm32_pin->pin.name) {
+                       names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", 
stm32_pin->pin.name);
+-              else
++                      if (!names[i]) {
++                              err = -ENOMEM;
++                              goto err_clk;
++                      }
++              } else {
+                       names[i] = NULL;
++              }
+       }
+ 
+       bank->gpio_chip.names = (const char * const *)names;
+diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
+index f3621adbd5debc..fbffd451031fdb 100644
+--- a/drivers/s390/char/sclp.c
++++ b/drivers/s390/char/sclp.c
+@@ -1195,7 +1195,8 @@ sclp_reboot_event(struct notifier_block *this, unsigned 
long event, void *ptr)
+ }
+ 
+ static struct notifier_block sclp_reboot_notifier = {
+-      .notifier_call = sclp_reboot_event
++      .notifier_call = sclp_reboot_event,
++      .priority      = INT_MIN,
+ };
+ 
+ static ssize_t con_pages_show(struct device_driver *dev, char *buf)
+diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
+index 218ae604f737ff..33b9c968dbcba6 100644
+--- a/drivers/s390/char/sclp_vt220.c
++++ b/drivers/s390/char/sclp_vt220.c
+@@ -319,7 +319,7 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
+       buffer = (void *) ((addr_t) sccb + sccb->header.length);
+ 
+       if (convertlf) {
+-              /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
++              /* Perform Linefeed conversion (0x0a -> 0x0d 0x0a)*/
+               for (from=0, to=0;
+                    (from < count) && (to < sclp_vt220_space_left(request));
+                    from++) {
+@@ -328,8 +328,8 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
+                       /* Perform conversion */
+                       if (c == 0x0a) {
+                               if (to + 1 < sclp_vt220_space_left(request)) {
+-                                      ((unsigned char *) buffer)[to++] = c;
+                                       ((unsigned char *) buffer)[to++] = 0x0d;
++                                      ((unsigned char *) buffer)[to++] = c;
+                               } else
+                                       break;
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index dc2cdd5f031114..3822efe349e13f 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -541,8 +541,8 @@ struct mpi3mr_hba_port {
+  * @port_list: List of ports belonging to a SAS node
+  * @num_phys: Number of phys associated with port
+  * @marked_responding: used while refresing the sas ports
+- * @lowest_phy: lowest phy ID of current sas port
+- * @phy_mask: phy_mask of current sas port
++ * @lowest_phy: lowest phy ID of current sas port, valid for controller port
++ * @phy_mask: phy_mask of current sas port, valid for controller port
+  * @hba_port: HBA port entry
+  * @remote_identify: Attached device identification
+  * @rphy: SAS transport layer rphy object
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c 
b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index ccd23def2e0cfa..0ba9e6a6a13c6d 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -590,12 +590,13 @@ static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 
link_rate)
+  * @mrioc: Adapter instance reference
+  * @mr_sas_port: Internal Port object
+  * @mr_sas_phy: Internal Phy object
++ * @host_node: Flag to indicate this is a host_node
+  *
+  * Return: None.
+  */
+ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_sas_port *mr_sas_port,
+-      struct mpi3mr_sas_phy *mr_sas_phy)
++      struct mpi3mr_sas_phy *mr_sas_phy, u8 host_node)
+ {
+       u64 sas_address = mr_sas_port->remote_identify.sas_address;
+ 
+@@ -605,9 +606,13 @@ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc 
*mrioc,
+ 
+       list_del(&mr_sas_phy->port_siblings);
+       mr_sas_port->num_phys--;
+-      mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id);
+-      if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id)
+-              mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
++
++      if (host_node) {
++              mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id);
++
++              if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id)
++                      mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 
1;
++      }
+       sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy);
+       mr_sas_phy->phy_belongs_to_port = 0;
+ }
+@@ -617,12 +622,13 @@ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc 
*mrioc,
+  * @mrioc: Adapter instance reference
+  * @mr_sas_port: Internal Port object
+  * @mr_sas_phy: Internal Phy object
++ * @host_node: Flag to indicate this is a host_node
+  *
+  * Return: None.
+  */
+ static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_sas_port *mr_sas_port,
+-      struct mpi3mr_sas_phy *mr_sas_phy)
++      struct mpi3mr_sas_phy *mr_sas_phy, u8 host_node)
+ {
+       u64 sas_address = mr_sas_port->remote_identify.sas_address;
+ 
+@@ -632,9 +638,12 @@ static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc,
+ 
+       list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list);
+       mr_sas_port->num_phys++;
+-      mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id);
+-      if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy)
+-              mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
++      if (host_node) {
++              mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id);
++
++              if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy)
++                      mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 
1;
++      }
+       sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy);
+       mr_sas_phy->phy_belongs_to_port = 1;
+ }
+@@ -675,7 +684,7 @@ static void mpi3mr_add_phy_to_an_existing_port(struct 
mpi3mr_ioc *mrioc,
+                       if (srch_phy == mr_sas_phy)
+                               return;
+               }
+-              mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy);
++              mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy, 
mr_sas_node->host_node);
+               return;
+       }
+ }
+@@ -736,7 +745,7 @@ static void mpi3mr_del_phy_from_an_existing_port(struct 
mpi3mr_ioc *mrioc,
+                               mpi3mr_delete_sas_port(mrioc, mr_sas_port);
+                       else
+                               mpi3mr_delete_sas_phy(mrioc, mr_sas_port,
+-                                  mr_sas_phy);
++                                  mr_sas_phy, mr_sas_node->host_node);
+                       return;
+               }
+       }
+@@ -1028,7 +1037,7 @@ mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 
port_id)
+ /**
+  * mpi3mr_get_hba_port_by_id - find hba port by id
+  * @mrioc: Adapter instance reference
+- * @port_id - Port ID to search
++ * @port_id: Port ID to search
+  *
+  * Return: mpi3mr_hba_port reference for the matched port
+  */
+@@ -1367,7 +1376,8 @@ static struct mpi3mr_sas_port 
*mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+       mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+           mr_sas_port->remote_identify.sas_address, hba_port);
+ 
+-      if (mr_sas_node->num_phys >= sizeof(mr_sas_port->phy_mask) * 8)
++      if (mr_sas_node->host_node && mr_sas_node->num_phys >=
++                      sizeof(mr_sas_port->phy_mask) * 8)
+               ioc_info(mrioc, "max port count %u could be too high\n",
+                   mr_sas_node->num_phys);
+ 
+@@ -1377,7 +1387,7 @@ static struct mpi3mr_sas_port 
*mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+                   (mr_sas_node->phy[i].hba_port != hba_port))
+                       continue;
+ 
+-              if (i >= sizeof(mr_sas_port->phy_mask) * 8) {
++              if (mr_sas_node->host_node && (i >= 
sizeof(mr_sas_port->phy_mask) * 8)) {
+                       ioc_warn(mrioc, "skipping port %u, max allowed value is 
%zu\n",
+                           i, sizeof(mr_sas_port->phy_mask) * 8);
+                       goto out_fail;
+@@ -1385,7 +1395,8 @@ static struct mpi3mr_sas_port 
*mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+               list_add_tail(&mr_sas_node->phy[i].port_siblings,
+                   &mr_sas_port->phy_list);
+               mr_sas_port->num_phys++;
+-              mr_sas_port->phy_mask |= (1 << i);
++              if (mr_sas_node->host_node)
++                      mr_sas_port->phy_mask |= (1 << i);
+       }
+ 
+       if (!mr_sas_port->num_phys) {
+@@ -1394,7 +1405,8 @@ static struct mpi3mr_sas_port 
*mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+               goto out_fail;
+       }
+ 
+-      mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
++      if (mr_sas_node->host_node)
++              mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ 
+       if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+               tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc,
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 5d37a09849163f..252849910588f6 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -3157,6 +3157,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool 
disc)
+       mutex_unlock(&gsm->mutex);
+       /* Now wipe the queues */
+       tty_ldisc_flush(gsm->tty);
++
++      guard(spinlock_irqsave)(&gsm->tx_lock);
+       list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
+               kfree(txq);
+       INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 67d4a72eda770b..90974d338f3c0b 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -762,6 +762,21 @@ static irqreturn_t __imx_uart_rtsint(int irq, void 
*dev_id)
+ 
+       imx_uart_writel(sport, USR1_RTSD, USR1);
+       usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
++      /*
++       * Update sport->old_status here, so any follow-up calls to
++       * imx_uart_mctrl_check() will be able to recognize that RTS
++       * state changed since last imx_uart_mctrl_check() call.
++       *
++       * In case RTS has been detected as asserted here and later on
++       * deasserted by the time imx_uart_mctrl_check() was called,
++       * imx_uart_mctrl_check() can detect the RTS state change and
++       * trigger uart_handle_cts_change() to unblock the port for
++       * further TX transfers.
++       */
++      if (usr1 & USR1_RTSS)
++              sport->old_status |= TIOCM_CTS;
++      else
++              sport->old_status &= ~TIOCM_CTS;
+       uart_handle_cts_change(&sport->port, usr1);
+       wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
+ 
+diff --git a/drivers/tty/serial/qcom_geni_serial.c 
b/drivers/tty/serial/qcom_geni_serial.c
+index f8f6e9466b400d..3acba0887fca40 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -146,6 +146,7 @@ static struct uart_driver qcom_geni_console_driver;
+ static struct uart_driver qcom_geni_uart_driver;
+ 
+ static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport);
++static int qcom_geni_serial_port_setup(struct uart_port *uport);
+ 
+ static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port 
*uport)
+ {
+@@ -393,6 +394,23 @@ static void qcom_geni_serial_poll_put_char(struct 
uart_port *uport,
+       writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+       qcom_geni_serial_poll_tx_done(uport);
+ }
++
++static int qcom_geni_serial_poll_init(struct uart_port *uport)
++{
++      struct qcom_geni_serial_port *port = to_dev_port(uport);
++      int ret;
++
++      if (!port->setup) {
++              ret = qcom_geni_serial_port_setup(uport);
++              if (ret)
++                      return ret;
++      }
++
++      if (!qcom_geni_serial_secondary_active(uport))
++              geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
++
++      return 0;
++}
+ #endif
+ 
+ #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+@@ -769,17 +787,27 @@ static void qcom_geni_serial_start_rx_fifo(struct 
uart_port *uport)
+ static void qcom_geni_serial_stop_rx_dma(struct uart_port *uport)
+ {
+       struct qcom_geni_serial_port *port = to_dev_port(uport);
++      bool done;
+ 
+       if (!qcom_geni_serial_secondary_active(uport))
+               return;
+ 
+       geni_se_cancel_s_cmd(&port->se);
+-      qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
+-                                S_CMD_CANCEL_EN, true);
+-
+-      if (qcom_geni_serial_secondary_active(uport))
++      done = qcom_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT,
++                      RX_EOT, true);
++      if (done) {
++              writel(RX_EOT | RX_DMA_DONE,
++                              uport->membase + SE_DMA_RX_IRQ_CLR);
++      } else {
+               qcom_geni_serial_abort_rx(uport);
+ 
++              writel(1, uport->membase + SE_DMA_RX_FSM_RST);
++              qcom_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT,
++                              RX_RESET_DONE, true);
++              writel(RX_RESET_DONE | RX_DMA_DONE,
++                              uport->membase + SE_DMA_RX_IRQ_CLR);
++      }
++
+       if (port->rx_dma_addr) {
+               geni_se_rx_dma_unprep(&port->se, port->rx_dma_addr,
+                                     DMA_RX_BUF_SIZE);
+@@ -1078,10 +1106,12 @@ static void qcom_geni_serial_shutdown(struct uart_port 
*uport)
+ {
+       disable_irq(uport->irq);
+ 
++      uart_port_lock_irq(uport);
+       qcom_geni_serial_stop_tx(uport);
+       qcom_geni_serial_stop_rx(uport);
+ 
+       qcom_geni_serial_cancel_tx_cmd(uport);
++      uart_port_unlock_irq(uport);
+ }
+ 
+ static void qcom_geni_serial_flush_buffer(struct uart_port *uport)
+@@ -1134,7 +1164,6 @@ static int qcom_geni_serial_port_setup(struct uart_port 
*uport)
+                              false, true, true);
+       geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
+       geni_se_select_mode(&port->se, port->dev_data->mode);
+-      qcom_geni_serial_start_rx(uport);
+       port->setup = true;
+ 
+       return 0;
+@@ -1150,6 +1179,11 @@ static int qcom_geni_serial_startup(struct uart_port 
*uport)
+               if (ret)
+                       return ret;
+       }
++
++      uart_port_lock_irq(uport);
++      qcom_geni_serial_start_rx(uport);
++      uart_port_unlock_irq(uport);
++
+       enable_irq(uport->irq);
+ 
+       return 0;
+@@ -1235,7 +1269,6 @@ static void qcom_geni_serial_set_termios(struct 
uart_port *uport,
+       unsigned int avg_bw_core;
+       unsigned long timeout;
+ 
+-      qcom_geni_serial_stop_rx(uport);
+       /* baud rate */
+       baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
+ 
+@@ -1251,7 +1284,7 @@ static void qcom_geni_serial_set_termios(struct 
uart_port *uport,
+               dev_err(port->se.dev,
+                       "Couldn't find suitable clock rate for %u\n",
+                       baud * sampling_rate);
+-              goto out_restart_rx;
++              return;
+       }
+ 
+       dev_dbg(port->se.dev, "desired_rate = %u, clk_rate = %lu, clk_div = 
%u\n",
+@@ -1342,8 +1375,6 @@ static void qcom_geni_serial_set_termios(struct 
uart_port *uport,
+       writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+       writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
+       writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
+-out_restart_rx:
+-      qcom_geni_serial_start_rx(uport);
+ }
+ 
+ #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+@@ -1564,7 +1595,7 @@ static const struct uart_ops qcom_geni_console_pops = {
+ #ifdef CONFIG_CONSOLE_POLL
+       .poll_get_char  = qcom_geni_serial_get_char,
+       .poll_put_char  = qcom_geni_serial_poll_put_char,
+-      .poll_init = qcom_geni_serial_port_setup,
++      .poll_init = qcom_geni_serial_poll_init,
+ #endif
+       .pm = qcom_geni_serial_pm,
+ };
+@@ -1763,38 +1794,6 @@ static int qcom_geni_serial_sys_resume(struct device 
*dev)
+       return ret;
+ }
+ 
+-static int qcom_geni_serial_sys_hib_resume(struct device *dev)
+-{
+-      int ret = 0;
+-      struct uart_port *uport;
+-      struct qcom_geni_private_data *private_data;
+-      struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
+-
+-      uport = &port->uport;
+-      private_data = uport->private_data;
+-
+-      if (uart_console(uport)) {
+-              geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS);
+-              geni_icc_set_bw(&port->se);
+-              ret = uart_resume_port(private_data->drv, uport);
+-              /*
+-               * For hibernation usecase clients for
+-               * console UART won't call port setup during restore,
+-               * hence call port setup for console uart.
+-               */
+-              qcom_geni_serial_port_setup(uport);
+-      } else {
+-              /*
+-               * Peripheral register settings are lost during hibernation.
+-               * Update setup flag such that port setup happens again
+-               * during next session. Clients of HS-UART will close and
+-               * open the port during hibernation.
+-               */
+-              port->setup = false;
+-      }
+-      return ret;
+-}
+-
+ static const struct qcom_geni_device_data qcom_geni_console_data = {
+       .console = true,
+       .mode = GENI_SE_FIFO,
+@@ -1806,12 +1805,8 @@ static const struct qcom_geni_device_data 
qcom_geni_uart_data = {
+ };
+ 
+ static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
+-      .suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+-      .resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
+-      .freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+-      .poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+-      .restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
+-      .thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
++      SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend,
++                                      qcom_geni_serial_sys_resume)
+ };
+ 
+ static const struct of_device_id qcom_geni_serial_match_table[] = {
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index cd87e3d1291edc..96842ce817af47 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -4726,7 +4726,7 @@ static int con_font_get(struct vc_data *vc, struct 
console_font_op *op)
+               return -EINVAL;
+ 
+       if (op->data) {
+-              font.data = kvmalloc(max_font_size, GFP_KERNEL);
++              font.data = kvzalloc(max_font_size, GFP_KERNEL);
+               if (!font.data)
+                       return -ENOMEM;
+       } else
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 5891cdacd0b3c5..3903947dbed1ca 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -539,7 +539,7 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int 
task_tag)
+       struct scsi_cmnd *cmd = lrbp->cmd;
+       struct ufs_hw_queue *hwq;
+       void __iomem *reg, *opr_sqd_base;
+-      u32 nexus, id, val;
++      u32 nexus, id, val, rtc;
+       int err;
+ 
+       if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
+@@ -569,17 +569,18 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int 
task_tag)
+       opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
+       writel(nexus, opr_sqd_base + REG_SQCTI);
+ 
+-      /* SQRTCy.ICU = 1 */
+-      writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
++      /* Initiate Cleanup */
++      writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU,
++              opr_sqd_base + REG_SQRTC);
+ 
+       /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
+       reg = opr_sqd_base + REG_SQRTS;
+       err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
+                               MCQ_POLL_US, false, reg);
+-      if (err)
+-              dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
+-                      __func__, id, task_tag,
+-                      FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
++      rtc = FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg));
++      if (err || rtc)
++              dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d RTC=%d\n",
++                      __func__, id, task_tag, err, rtc);
+ 
+       if (ufshcd_mcq_sq_start(hba, hwq))
+               err = -ETIMEDOUT;
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index ce0620e804484a..09408642a6efba 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -5403,10 +5403,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct 
ufshcd_lrb *lrbp,
+               }
+               break;
+       case OCS_ABORTED:
+-              result |= DID_ABORT << 16;
+-              break;
+       case OCS_INVALID_COMMAND_STATUS:
+               result |= DID_REQUEUE << 16;
++              dev_warn(hba->dev,
++                              "OCS %s from controller for tag %d\n",
++                              (ocs == OCS_ABORTED ? "aborted" : "invalid"),
++                              lrbp->task_tag);
+               break;
+       case OCS_INVALID_CMD_TABLE_ATTR:
+       case OCS_INVALID_PRDT_ATTR:
+@@ -6470,26 +6472,12 @@ static bool ufshcd_abort_one(struct request *rq, void 
*priv)
+       struct scsi_device *sdev = cmd->device;
+       struct Scsi_Host *shost = sdev->host;
+       struct ufs_hba *hba = shost_priv(shost);
+-      struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+-      struct ufs_hw_queue *hwq;
+-      unsigned long flags;
+ 
+       *ret = ufshcd_try_to_abort_task(hba, tag);
+       dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+               hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+               *ret ? "failed" : "succeeded");
+ 
+-      /* Release cmd in MCQ mode if abort succeeds */
+-      if (hba->mcq_enabled && (*ret == 0)) {
+-              hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
+-              if (!hwq)
+-                      return 0;
+-              spin_lock_irqsave(&hwq->cq_lock, flags);
+-              if (ufshcd_cmd_inflight(lrbp->cmd))
+-                      ufshcd_release_scsi_cmd(hba, lrbp);
+-              spin_unlock_irqrestore(&hwq->cq_lock, flags);
+-      }
+-
+       return *ret == 0;
+ }
+ 
+@@ -10214,7 +10202,9 @@ static void ufshcd_wl_shutdown(struct device *dev)
+       shost_for_each_device(sdev, hba->host) {
+               if (sdev == hba->ufs_device_wlun)
+                       continue;
+-              scsi_device_quiesce(sdev);
++              mutex_lock(&sdev->state_mutex);
++              scsi_device_set_state(sdev, SDEV_OFFLINE);
++              mutex_unlock(&sdev->state_mutex);
+       }
+       __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 21740e2b8f0781..427e5660f87c24 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -2342,6 +2342,11 @@ static int dwc3_suspend_common(struct dwc3 *dwc, 
pm_message_t msg)
+       u32 reg;
+       int i;
+ 
++      dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
++                          DWC3_GUSB2PHYCFG_SUSPHY) ||
++                          (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
++                          DWC3_GUSB3PIPECTL_SUSPHY);
++
+       switch (dwc->current_dr_role) {
+       case DWC3_GCTL_PRTCAP_DEVICE:
+               if (pm_runtime_suspended(dwc->dev))
+@@ -2393,6 +2398,15 @@ static int dwc3_suspend_common(struct dwc3 *dwc, 
pm_message_t msg)
+               break;
+       }
+ 
++      if (!PMSG_IS_AUTO(msg)) {
++              /*
++               * TI AM62 platform requires SUSPHY to be
++               * enabled for system suspend to work.
++               */
++              if (!dwc->susphy_state)
++                      dwc3_enable_susphy(dwc, true);
++      }
++
+       return 0;
+ }
+ 
+@@ -2460,6 +2474,11 @@ static int dwc3_resume_common(struct dwc3 *dwc, 
pm_message_t msg)
+               break;
+       }
+ 
++      if (!PMSG_IS_AUTO(msg)) {
++              /* restore SUSPHY state to that before system suspend. */
++              dwc3_enable_susphy(dwc, dwc->susphy_state);
++      }
++
+       return 0;
+ }
+ 
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 9c508e0c5cdf54..eab81dfdcc3502 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -1150,6 +1150,8 @@ struct dwc3_scratchpad_array {
+  * @sys_wakeup: set if the device may do system wakeup.
+  * @wakeup_configured: set if the device is configured for remote wakeup.
+  * @suspended: set to track suspend event due to U3/L2.
++ * @susphy_state: state of DWC3_GUSB2PHYCFG_SUSPHY + DWC3_GUSB3PIPECTL_SUSPHY
++ *              before PM suspend.
+  * @imod_interval: set the interrupt moderation interval in 250ns
+  *                    increments or 0 to disable.
+  * @max_cfg_eps: current max number of IN eps used across all USB configs.
+@@ -1382,6 +1384,7 @@ struct dwc3 {
+       unsigned                sys_wakeup:1;
+       unsigned                wakeup_configured:1;
+       unsigned                suspended:1;
++      unsigned                susphy_state:1;
+ 
+       u16                     imod_interval;
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 10178e5eda5a3f..4959c26d3b71b8 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -438,6 +438,10 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned 
int cmd,
+                       dwc3_gadget_ep_get_transfer_index(dep);
+       }
+ 
++      if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
++          !(cmd & DWC3_DEPCMD_CMDIOC))
++              mdelay(1);
++
+       if (saved_config) {
+               reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+               reg |= saved_config;
+@@ -1715,12 +1719,10 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep 
*dep, bool force, bool int
+       WARN_ON_ONCE(ret);
+       dep->resource_index = 0;
+ 
+-      if (!interrupt) {
+-              mdelay(1);
++      if (!interrupt)
+               dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+-      } else if (!ret) {
++      else if (!ret)
+               dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+-      }
+ 
+       dep->flags &= ~DWC3_EP_DELAY_STOP;
+       return ret;
+diff --git a/drivers/usb/gadget/function/f_uac2.c 
b/drivers/usb/gadget/function/f_uac2.c
+index 2d6d3286ffde2c..080dc512741889 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -2055,7 +2055,7 @@ static ssize_t f_uac2_opts_##name##_store(struct 
config_item *item,      \
+                                         const char *page, size_t len) \
+ {                                                                     \
+       struct f_uac2_opts *opts = to_f_uac2_opts(item);                \
+-      int ret = 0;                                                    \
++      int ret = len;                                                  \
+                                                                       \
+       mutex_lock(&opts->lock);                                        \
+       if (opts->refcnt) {                                             \
+@@ -2066,8 +2066,8 @@ static ssize_t f_uac2_opts_##name##_store(struct 
config_item *item,      \
+       if (len && page[len - 1] == '\n')                               \
+               len--;                                                  \
+                                                                       \
+-      ret = scnprintf(opts->name, min(sizeof(opts->name), len + 1),   \
+-                      "%s", page);                                    \
++      scnprintf(opts->name, min(sizeof(opts->name), len + 1),         \
++                "%s", page);                                          \
+                                                                       \
+ end:                                                                  \
+       mutex_unlock(&opts->lock);                                      \
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c 
b/drivers/usb/gadget/udc/dummy_hcd.c
+index ff7bee78bcc492..d5d89fadde433f 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -254,6 +254,7 @@ struct dummy_hcd {
+       u32                             stream_en_ep;
+       u8                              num_stream[30 / 2];
+ 
++      unsigned                        timer_pending:1;
+       unsigned                        active:1;
+       unsigned                        old_active:1;
+       unsigned                        resuming:1;
+@@ -1303,9 +1304,11 @@ static int dummy_urb_enqueue(
+               urb->error_count = 1;           /* mark as a new urb */
+ 
+       /* kick the scheduler, it'll do the rest */
+-      if (!hrtimer_active(&dum_hcd->timer))
++      if (!dum_hcd->timer_pending) {
++              dum_hcd->timer_pending = 1;
+               hrtimer_start(&dum_hcd->timer, 
ns_to_ktime(DUMMY_TIMER_INT_NSECS),
+                               HRTIMER_MODE_REL_SOFT);
++      }
+ 
+  done:
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+@@ -1324,9 +1327,10 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, 
struct urb *urb, int status)
+       spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+ 
+       rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+-      if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
+-                      !list_empty(&dum_hcd->urbp_list))
++      if (rc == 0 && !dum_hcd->timer_pending) {
++              dum_hcd->timer_pending = 1;
+               hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), 
HRTIMER_MODE_REL_SOFT);
++      }
+ 
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+       return rc;
+@@ -1813,6 +1817,7 @@ static enum hrtimer_restart dummy_timer(struct hrtimer 
*t)
+ 
+       /* look at each urb queued by the host side driver */
+       spin_lock_irqsave(&dum->lock, flags);
++      dum_hcd->timer_pending = 0;
+ 
+       if (!dum_hcd->udev) {
+               dev_err(dummy_dev(dum_hcd),
+@@ -1994,8 +1999,10 @@ static enum hrtimer_restart dummy_timer(struct hrtimer 
*t)
+       if (list_empty(&dum_hcd->urbp_list)) {
+               usb_put_dev(dum_hcd->udev);
+               dum_hcd->udev = NULL;
+-      } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
++      } else if (!dum_hcd->timer_pending &&
++                      dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+               /* want a 1 msec delay here */
++              dum_hcd->timer_pending = 1;
+               hrtimer_start(&dum_hcd->timer, 
ns_to_ktime(DUMMY_TIMER_INT_NSECS),
+                               HRTIMER_MODE_REL_SOFT);
+       }
+@@ -2390,8 +2397,10 @@ static int dummy_bus_resume(struct usb_hcd *hcd)
+       } else {
+               dum_hcd->rh_state = DUMMY_RH_RUNNING;
+               set_link_state(dum_hcd);
+-              if (!list_empty(&dum_hcd->urbp_list))
++              if (!list_empty(&dum_hcd->urbp_list)) {
++                      dum_hcd->timer_pending = 1;
+                       hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), 
HRTIMER_MODE_REL_SOFT);
++              }
+               hcd->state = HC_STATE_RUNNING;
+       }
+       spin_unlock_irq(&dum_hcd->dum->lock);
+@@ -2522,6 +2531,7 @@ static void dummy_stop(struct usb_hcd *hcd)
+       struct dummy_hcd        *dum_hcd = hcd_to_dummy_hcd(hcd);
+ 
+       hrtimer_cancel(&dum_hcd->timer);
++      dum_hcd->timer_pending = 0;
+       device_remove_file(dummy_dev(dum_hcd), &dev_attr_urbs);
+       dev_info(dummy_dev(dum_hcd), "stopped\n");
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 90726899bc5bcc..785183f0b5f9de 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1023,7 +1023,7 @@ static int xhci_invalidate_cancelled_tds(struct 
xhci_virt_ep *ep)
+                                       td_to_noop(xhci, ring, cached_td, 
false);
+                                       cached_td->cancel_status = TD_CLEARED;
+                               }
+-
++                              td_to_noop(xhci, ring, td, false);
+                               td->cancel_status = TD_CLEARING_CACHE;
+                               cached_td = td;
+                               break;
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index 6246d5ad146848..76f228e7443cb6 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -2183,7 +2183,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb 
*tegra, bool runtime)
+               goto out;
+       }
+ 
+-      for (i = 0; i < tegra->num_usb_phys; i++) {
++      for (i = 0; i < xhci->usb2_rhub.num_ports; i++) {
+               if (!xhci->usb2_rhub.ports[i])
+                       continue;
+               portsc = readl(xhci->usb2_rhub.ports[i]->addr);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 0ea95ad4cb9022..856f16e64dcf05 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1001,7 +1001,7 @@ enum xhci_setup_dev {
+ /* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
+ #define TRB_TO_STREAM_ID(p)           ((((p) & (0xffff << 16)) >> 16))
+ #define STREAM_ID_FOR_TRB(p)          ((((p)) & 0xffff) << 16)
+-#define SCT_FOR_TRB(p)                        (((p) << 1) & 0x7)
++#define SCT_FOR_TRB(p)                        (((p) & 0x7) << 1)
+ 
+ /* Link TRB specific fields */
+ #define TRB_TC                        (1<<1)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 176f38750ad589..55886b64cadd83 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -279,6 +279,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EG912Y                        0x6001
+ #define QUECTEL_PRODUCT_EC200S_CN             0x6002
+ #define QUECTEL_PRODUCT_EC200A                        0x6005
++#define QUECTEL_PRODUCT_EG916Q                        0x6007
+ #define QUECTEL_PRODUCT_EM061K_LWW            0x6008
+ #define QUECTEL_PRODUCT_EM061K_LCN            0x6009
+ #define QUECTEL_PRODUCT_EC200T                        0x6026
+@@ -1270,6 +1271,7 @@ static const struct usb_device_id option_ids[] = {
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 
QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 
QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 
QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
++      { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 
QUECTEL_PRODUCT_EG916Q, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 
QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+ 
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+@@ -1380,10 +1382,16 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff),    /* 
Telit FN20C04 (rmnet) */
+         .driver_info = RSVD(0) | NCTRL(3) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff),    /* 
Telit FN920C04 (MBIM) */
++        .driver_info = NCTRL(4) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff),    /* 
Telit FN20C04 (rmnet) */
+         .driver_info = RSVD(0) | NCTRL(3) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff),    /* 
Telit FN920C04 (MBIM) */
++        .driver_info = NCTRL(4) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff),    /* 
Telit FN20C04 (rmnet) */
+         .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff),    /* 
Telit FN920C04 (MBIM) */
++        .driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c 
b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+index a747baa2978498..c37dede62e12cd 100644
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+@@ -432,7 +432,6 @@ static int qcom_pmic_typec_port_get_cc(struct tcpc_dev 
*tcpc,
+                       val = TYPEC_CC_RP_DEF;
+                       break;
+               }
+-              val = TYPEC_CC_RP_DEF;
+       }
+ 
+       if (misc & CC_ORIENTATION)
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index f0cf8ce26f010f..a5098973bcef6e 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1374,7 +1374,7 @@ static noinline int add_inode_ref(struct 
btrfs_trans_handle *trans,
+       struct inode *inode = NULL;
+       unsigned long ref_ptr;
+       unsigned long ref_end;
+-      struct fscrypt_str name;
++      struct fscrypt_str name = { 0 };
+       int ret;
+       int log_ref_ver = 0;
+       u64 parent_objectid;
+@@ -1845,7 +1845,7 @@ static noinline int replay_one_name(struct 
btrfs_trans_handle *trans,
+                                   struct btrfs_dir_item *di,
+                                   struct btrfs_key *key)
+ {
+-      struct fscrypt_str name;
++      struct fscrypt_str name = { 0 };
+       struct btrfs_dir_item *dir_dst_di;
+       struct btrfs_dir_item *index_dst_di;
+       bool dir_dst_matches = false;
+@@ -2125,7 +2125,7 @@ static noinline int check_item_in_log(struct 
btrfs_trans_handle *trans,
+       struct extent_buffer *eb;
+       int slot;
+       struct btrfs_dir_item *di;
+-      struct fscrypt_str name;
++      struct fscrypt_str name = { 0 };
+       struct inode *inode = NULL;
+       struct btrfs_key location;
+ 
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index 6423e1dedf1471..15bf32c21ac0db 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -1037,7 +1037,7 @@ static int vfat_rename(struct inode *old_dir, struct 
dentry *old_dentry,
+       if (corrupt < 0) {
+               fat_fs_error(new_dir->i_sb,
+                            "%s: Filesystem corrupted (i_pos %lld)",
+-                           __func__, sinfo.i_pos);
++                           __func__, new_i_pos);
+       }
+       goto out;
+ }
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index 4a29b0138d75f5..87f59b748b0ba6 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -323,7 +323,7 @@ static int nilfs_readdir(struct file *file, struct 
dir_context *ctx)
+  * The folio is mapped and unlocked.  When the caller is finished with
+  * the entry, it should call folio_release_kmap().
+  *
+- * On failure, returns NULL and the caller should ignore foliop.
++ * On failure, returns an error pointer and the caller should ignore foliop.
+  */
+ struct nilfs_dir_entry *nilfs_find_entry(struct inode *dir,
+               const struct qstr *qstr, struct folio **foliop)
+@@ -346,22 +346,24 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode 
*dir,
+       do {
+               char *kaddr = nilfs_get_folio(dir, n, foliop);
+ 
+-              if (!IS_ERR(kaddr)) {
+-                      de = (struct nilfs_dir_entry *)kaddr;
+-                      kaddr += nilfs_last_byte(dir, n) - reclen;
+-                      while ((char *) de <= kaddr) {
+-                              if (de->rec_len == 0) {
+-                                      nilfs_error(dir->i_sb,
+-                                              "zero-length directory entry");
+-                                      folio_release_kmap(*foliop, kaddr);
+-                                      goto out;
+-                              }
+-                              if (nilfs_match(namelen, name, de))
+-                                      goto found;
+-                              de = nilfs_next_entry(de);
++              if (IS_ERR(kaddr))
++                      return ERR_CAST(kaddr);
++
++              de = (struct nilfs_dir_entry *)kaddr;
++              kaddr += nilfs_last_byte(dir, n) - reclen;
++              while ((char *)de <= kaddr) {
++                      if (de->rec_len == 0) {
++                              nilfs_error(dir->i_sb,
++                                          "zero-length directory entry");
++                              folio_release_kmap(*foliop, kaddr);
++                              goto out;
+                       }
+-                      folio_release_kmap(*foliop, kaddr);
++                      if (nilfs_match(namelen, name, de))
++                              goto found;
++                      de = nilfs_next_entry(de);
+               }
++              folio_release_kmap(*foliop, kaddr);
++
+               if (++n >= npages)
+                       n = 0;
+               /* next folio is past the blocks we've got */
+@@ -374,7 +376,7 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *dir,
+               }
+       } while (n != start);
+ out:
+-      return NULL;
++      return ERR_PTR(-ENOENT);
+ 
+ found:
+       ei->i_dir_start_lookup = n;
+@@ -418,18 +420,18 @@ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, 
struct folio **foliop)
+       return NULL;
+ }
+ 
+-ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
++int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t 
*ino)
+ {
+-      ino_t res = 0;
+       struct nilfs_dir_entry *de;
+       struct folio *folio;
+ 
+       de = nilfs_find_entry(dir, qstr, &folio);
+-      if (de) {
+-              res = le64_to_cpu(de->inode);
+-              folio_release_kmap(folio, de);
+-      }
+-      return res;
++      if (IS_ERR(de))
++              return PTR_ERR(de);
++
++      *ino = le64_to_cpu(de->inode);
++      folio_release_kmap(folio, de);
++      return 0;
+ }
+ 
+ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
+index c950139db6ef0d..4905063790c578 100644
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -55,12 +55,20 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, 
unsigned int flags)
+ {
+       struct inode *inode;
+       ino_t ino;
++      int res;
+ 
+       if (dentry->d_name.len > NILFS_NAME_LEN)
+               return ERR_PTR(-ENAMETOOLONG);
+ 
+-      ino = nilfs_inode_by_name(dir, &dentry->d_name);
+-      inode = ino ? nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino) : NULL;
++      res = nilfs_inode_by_name(dir, &dentry->d_name, &ino);
++      if (res) {
++              if (res != -ENOENT)
++                      return ERR_PTR(res);
++              inode = NULL;
++      } else {
++              inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
++      }
++
+       return d_splice_alias(inode, dentry);
+ }
+ 
+@@ -263,10 +271,11 @@ static int nilfs_do_unlink(struct inode *dir, struct 
dentry *dentry)
+       struct folio *folio;
+       int err;
+ 
+-      err = -ENOENT;
+       de = nilfs_find_entry(dir, &dentry->d_name, &folio);
+-      if (!de)
++      if (IS_ERR(de)) {
++              err = PTR_ERR(de);
+               goto out;
++      }
+ 
+       inode = d_inode(dentry);
+       err = -EIO;
+@@ -362,10 +371,11 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+       if (unlikely(err))
+               return err;
+ 
+-      err = -ENOENT;
+       old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
+-      if (!old_de)
++      if (IS_ERR(old_de)) {
++              err = PTR_ERR(old_de);
+               goto out;
++      }
+ 
+       if (S_ISDIR(old_inode->i_mode)) {
+               err = -EIO;
+@@ -382,10 +392,12 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+               if (dir_de && !nilfs_empty_dir(new_inode))
+                       goto out_dir;
+ 
+-              err = -ENOENT;
+-              new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, 
&new_folio);
+-              if (!new_de)
++              new_de = nilfs_find_entry(new_dir, &new_dentry->d_name,
++                                        &new_folio);
++              if (IS_ERR(new_de)) {
++                      err = PTR_ERR(new_de);
+                       goto out_dir;
++              }
+               nilfs_set_link(new_dir, new_de, new_folio, old_inode);
+               folio_release_kmap(new_folio, new_de);
+               nilfs_mark_inode_dirty(new_dir);
+@@ -440,12 +452,13 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+  */
+ static struct dentry *nilfs_get_parent(struct dentry *child)
+ {
+-      unsigned long ino;
++      ino_t ino;
++      int res;
+       struct nilfs_root *root;
+ 
+-      ino = nilfs_inode_by_name(d_inode(child), &dotdot_name);
+-      if (!ino)
+-              return ERR_PTR(-ENOENT);
++      res = nilfs_inode_by_name(d_inode(child), &dotdot_name, &ino);
++      if (res)
++              return ERR_PTR(res);
+ 
+       root = NILFS_I(d_inode(child))->i_root;
+ 
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index 4017f78564405a..0a80dc39a3aa46 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -233,7 +233,7 @@ static inline __u32 nilfs_mask_flags(umode_t mode, __u32 
flags)
+ 
+ /* dir.c */
+ int nilfs_add_link(struct dentry *, struct inode *);
+-ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
++int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t 
*ino);
+ int nilfs_make_empty(struct inode *, struct inode *);
+ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *,
+               struct folio **);
+diff --git a/fs/smb/server/mgmt/user_session.c 
b/fs/smb/server/mgmt/user_session.c
+index 99416ce9f50183..1e4624e9d434ab 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -177,9 +177,10 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+ 
+       down_write(&conn->session_lock);
+       xa_for_each(&conn->sessions, id, sess) {
+-              if (sess->state != SMB2_SESSION_VALID ||
+-                  time_after(jiffies,
+-                             sess->last_active + SMB2_SESSION_TIMEOUT)) {
++              if (atomic_read(&sess->refcnt) == 0 &&
++                  (sess->state != SMB2_SESSION_VALID ||
++                   time_after(jiffies,
++                             sess->last_active + SMB2_SESSION_TIMEOUT))) {
+                       xa_erase(&conn->sessions, sess->id);
+                       hash_del(&sess->hlist);
+                       ksmbd_session_destroy(sess);
+@@ -269,8 +270,6 @@ struct ksmbd_session 
*ksmbd_session_lookup_slowpath(unsigned long long id)
+ 
+       down_read(&sessions_table_lock);
+       sess = __session_lookup(id);
+-      if (sess)
+-              sess->last_active = jiffies;
+       up_read(&sessions_table_lock);
+ 
+       return sess;
+@@ -289,6 +288,22 @@ struct ksmbd_session *ksmbd_session_lookup_all(struct 
ksmbd_conn *conn,
+       return sess;
+ }
+ 
++void ksmbd_user_session_get(struct ksmbd_session *sess)
++{
++      atomic_inc(&sess->refcnt);
++}
++
++void ksmbd_user_session_put(struct ksmbd_session *sess)
++{
++      if (!sess)
++              return;
++
++      if (atomic_read(&sess->refcnt) <= 0)
++              WARN_ON(1);
++      else
++              atomic_dec(&sess->refcnt);
++}
++
+ struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+                                                   u64 sess_id)
+ {
+@@ -393,6 +408,7 @@ static struct ksmbd_session *__session_create(int protocol)
+       xa_init(&sess->rpc_handle_list);
+       sess->sequence_number = 1;
+       rwlock_init(&sess->tree_conns_lock);
++      atomic_set(&sess->refcnt, 1);
+ 
+       ret = __init_smb2_session(sess);
+       if (ret)
+diff --git a/fs/smb/server/mgmt/user_session.h 
b/fs/smb/server/mgmt/user_session.h
+index dc9fded2cd4379..c1c4b20bd5c6cf 100644
+--- a/fs/smb/server/mgmt/user_session.h
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -61,6 +61,8 @@ struct ksmbd_session {
+       struct ksmbd_file_table         file_table;
+       unsigned long                   last_active;
+       rwlock_t                        tree_conns_lock;
++
++      atomic_t                        refcnt;
+ };
+ 
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+@@ -104,4 +106,6 @@ void ksmbd_release_tree_conn_id(struct ksmbd_session 
*sess, int id);
+ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name);
+ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id);
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id);
++void ksmbd_user_session_get(struct ksmbd_session *sess);
++void ksmbd_user_session_put(struct ksmbd_session *sess);
+ #endif /* __USER_SESSION_MANAGEMENT_H__ */
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index 4d24cc105ef6b5..bb3e7b09201a88 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -238,6 +238,8 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
+       } while (is_chained == true);
+ 
+ send:
++      if (work->sess)
++              ksmbd_user_session_put(work->sess);
+       if (work->tcon)
+               ksmbd_tree_connect_put(work->tcon);
+       smb3_preauth_hash_rsp(work);
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 065adfb985fe2a..72e0880617ebc6 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -605,8 +605,10 @@ int smb2_check_user_session(struct ksmbd_work *work)
+ 
+       /* Check for validity of user session */
+       work->sess = ksmbd_session_lookup_all(conn, sess_id);
+-      if (work->sess)
++      if (work->sess) {
++              ksmbd_user_session_get(work->sess);
+               return 1;
++      }
+       ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id);
+       return -ENOENT;
+ }
+@@ -1746,6 +1748,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+               }
+ 
+               conn->binding = true;
++              ksmbd_user_session_get(sess);
+       } else if ((conn->dialect < SMB30_PROT_ID ||
+                   server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+                  (req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+@@ -1772,6 +1775,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+               }
+ 
+               conn->binding = false;
++              ksmbd_user_session_get(sess);
+       }
+       work->sess = sess;
+ 
+@@ -2232,7 +2236,9 @@ int smb2_session_logoff(struct ksmbd_work *work)
+       }
+ 
+       ksmbd_destroy_file_table(&sess->file_table);
++      down_write(&conn->session_lock);
+       sess->state = SMB2_SESSION_EXPIRED;
++      up_write(&conn->session_lock);
+ 
+       ksmbd_free_user(sess->user);
+       sess->user = NULL;
+diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
+index df25fffdc0ae71..623ccfcbf39c35 100644
+--- a/include/linux/fsl/enetc_mdio.h
++++ b/include/linux/fsl/enetc_mdio.h
+@@ -59,7 +59,8 @@ static inline int enetc_mdio_read_c45(struct mii_bus *bus, 
int phy_id,
+ static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
+                                      int devad, int regnum, u16 value)
+ { return -EINVAL; }
+-struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
++static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
++                                            void __iomem *port_regs)
+ { return ERR_PTR(-EINVAL); }
+ 
+ #endif
+diff --git a/include/linux/irqchip/arm-gic-v4.h 
b/include/linux/irqchip/arm-gic-v4.h
+index ecabed6d330752..7f1f11a5e4e44b 100644
+--- a/include/linux/irqchip/arm-gic-v4.h
++++ b/include/linux/irqchip/arm-gic-v4.h
+@@ -66,10 +66,12 @@ struct its_vpe {
+                               bool    enabled;
+                               bool    group;
+                       }                       sgi_config[16];
+-                      atomic_t vmapp_count;
+               };
+       };
+ 
++      /* Track the VPE being mapped */
++      atomic_t vmapp_count;
++
+       /*
+        * Ensures mutual exclusion between affinity setting of the
+        * vPE and vLPI operations using vpe->col_idx.
+diff --git a/include/trace/events/huge_memory.h 
b/include/trace/events/huge_memory.h
+index b5f5369b63009f..9d5c00b0285c34 100644
+--- a/include/trace/events/huge_memory.h
++++ b/include/trace/events/huge_memory.h
+@@ -208,7 +208,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
+ 
+ TRACE_EVENT(mm_khugepaged_collapse_file,
+       TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index,
+-                      bool is_shmem, unsigned long addr, struct file *file,
++                      unsigned long addr, bool is_shmem, struct file *file,
+                       int nr, int result),
+       TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result),
+       TP_STRUCT__entry(
+@@ -233,7 +233,7 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
+               __entry->result = result;
+       ),
+ 
+-      TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%ld, is_shmem=%d, 
filename=%s, nr=%d, result=%s",
++      TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%lx, is_shmem=%d, 
filename=%s, nr=%d, result=%s",
+               __entry->mm,
+               __entry->hpfn,
+               __entry->index,
+diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
+index c8dc5f8ea69962..12873639ea9644 100644
+--- a/include/uapi/linux/ublk_cmd.h
++++ b/include/uapi/linux/ublk_cmd.h
+@@ -175,7 +175,13 @@
+ /* use ioctl encoding for uring command */
+ #define UBLK_F_CMD_IOCTL_ENCODE       (1UL << 6)
+ 
+-/* Copy between request and user buffer by pread()/pwrite() */
++/*
++ *  Copy between request and user buffer by pread()/pwrite()
++ *
++ *  Not available for UBLK_F_UNPRIVILEGED_DEV, otherwise userspace may
++ *  deceive us by not filling request buffer, then kernel uninitialized
++ *  data may be leaked.
++ */
+ #define UBLK_F_USER_COPY      (1UL << 7)
+ 
+ /*
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index c2acf6180845db..a8ee7287ac9f2d 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -279,7 +279,14 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
+ {
+       struct io_rings *r = ctx->rings;
+ 
+-      return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
++      /*
++       * SQPOLL must use the actual sqring head, as using the cached_sq_head
++       * is race prone if the SQPOLL thread has grabbed entries but not yet
++       * committed them to the ring. For !SQPOLL, this doesn't matter, but
++       * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
++       * just read the actual sqring head unconditionally.
++       */
++      return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
+ }
+ 
+ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
+@@ -315,6 +322,7 @@ static inline int io_run_task_work(void)
+               if (current->io_uring) {
+                       unsigned int count = 0;
+ 
++                      __set_current_state(TASK_RUNNING);
+                       tctx_task_work_run(current->io_uring, UINT_MAX, &count);
+                       if (count)
+                               ret = true;
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index 4782edcbe7b9b4..6b5d5c0021fae6 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -319,6 +319,9 @@ static int pc_clock_settime(clockid_t id, const struct 
timespec64 *ts)
+               goto out;
+       }
+ 
++      if (!timespec64_valid_strict(ts))
++              return -EINVAL;
++
+       if (cd.clk->ops.clock_settime)
+               err = cd.clk->ops.clock_settime(cd.clk, ts);
+       else
+diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
+index d7d4fb403f6f0f..43f4e3f57438b4 100644
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -1160,19 +1160,13 @@ void fgraph_update_pid_func(void)
+ static int start_graph_tracing(void)
+ {
+       unsigned long **ret_stack_list;
+-      int ret, cpu;
++      int ret;
+ 
+       ret_stack_list = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
+ 
+       if (!ret_stack_list)
+               return -ENOMEM;
+ 
+-      /* The cpu_boot init_task->ret_stack will never be freed */
+-      for_each_online_cpu(cpu) {
+-              if (!idle_task(cpu)->ret_stack)
+-                      ftrace_graph_init_idle_task(idle_task(cpu), cpu);
+-      }
+-
+       do {
+               ret = alloc_retstack_tasklist(ret_stack_list);
+       } while (ret == -EAGAIN);
+@@ -1242,14 +1236,34 @@ static void ftrace_graph_disable_direct(bool 
disable_branch)
+       fgraph_direct_gops = &fgraph_stub;
+ }
+ 
++/* The cpu_boot init_task->ret_stack will never be freed */
++static int fgraph_cpu_init(unsigned int cpu)
++{
++      if (!idle_task(cpu)->ret_stack)
++              ftrace_graph_init_idle_task(idle_task(cpu), cpu);
++      return 0;
++}
++
+ int register_ftrace_graph(struct fgraph_ops *gops)
+ {
++      static bool fgraph_initialized;
+       int command = 0;
+       int ret = 0;
+       int i = -1;
+ 
+       mutex_lock(&ftrace_lock);
+ 
++      if (!fgraph_initialized) {
++              ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph_idle_init",
++                                      fgraph_cpu_init, NULL);
++              if (ret < 0) {
++                      pr_warn("fgraph: Error to init cpu hotplug support\n");
++                      return ret;
++              }
++              fgraph_initialized = true;
++              ret = 0;
++      }
++
+       if (!fgraph_array[0]) {
+               /* The array must always have real data on it */
+               for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 6df3a8b95808ab..20f6f7ae937272 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -2196,6 +2196,8 @@ static inline void mas_node_or_none(struct ma_state *mas,
+ 
+ /*
+  * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
++ *                      If @mas->index cannot be found within the containing
++ *                      node, we traverse to the last entry in the node.
+  * @wr_mas: The maple write state
+  *
+  * Uses mas_slot_locked() and does not need to worry about dead nodes.
+@@ -3609,7 +3611,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas)
+       return true;
+ }
+ 
+-static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
++static void mas_wr_walk_index(struct ma_wr_state *wr_mas)
+ {
+       struct ma_state *mas = wr_mas->mas;
+ 
+@@ -3618,11 +3620,9 @@ static bool mas_wr_walk_index(struct ma_wr_state 
*wr_mas)
+               wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+                                                 mas->offset);
+               if (ma_is_leaf(wr_mas->type))
+-                      return true;
++                      return;
+               mas_wr_walk_traverse(wr_mas);
+-
+       }
+-      return true;
+ }
+ /*
+  * mas_extend_spanning_null() - Extend a store of a %NULL to include 
surrounding %NULLs.
+@@ -3853,8 +3853,8 @@ static inline int mas_wr_spanning_store(struct 
ma_wr_state *wr_mas)
+       memset(&b_node, 0, sizeof(struct maple_big_node));
+       /* Copy l_mas and store the value in b_node. */
+       mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
+-      /* Copy r_mas into b_node. */
+-      if (r_mas.offset <= r_mas.end)
++      /* Copy r_mas into b_node if there is anything to copy. */
++      if (r_mas.max > r_mas.last)
+               mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
+                          &b_node, b_node.b_end + 1);
+       else
+diff --git a/mm/damon/sysfs-test.h b/mm/damon/sysfs-test.h
+index 1c9b596057a7cc..7b5c7b307da99c 100644
+--- a/mm/damon/sysfs-test.h
++++ b/mm/damon/sysfs-test.h
+@@ -67,6 +67,7 @@ static void damon_sysfs_test_add_targets(struct kunit *test)
+       damon_destroy_ctx(ctx);
+       kfree(sysfs_targets->targets_arr);
+       kfree(sysfs_targets);
++      kfree(sysfs_target->regions);
+       kfree(sysfs_target);
+ }
+ 
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index cdd1d8655a76bb..4cba91ecf74b89 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -2219,7 +2219,7 @@ static int collapse_file(struct mm_struct *mm, unsigned 
long addr,
+       folio_put(new_folio);
+ out:
+       VM_BUG_ON(!list_empty(&pagelist));
+-      trace_mm_khugepaged_collapse_file(mm, new_folio, index, is_shmem, addr, 
file, HPAGE_PMD_NR, result);
++      trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, 
file, HPAGE_PMD_NR, result);
+       return result;
+ }
+ 
+diff --git a/mm/mremap.c b/mm/mremap.c
+index e7ae140fc6409b..3ca167d84c5655 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -238,6 +238,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, 
unsigned long old_addr,
+ {
+       spinlock_t *old_ptl, *new_ptl;
+       struct mm_struct *mm = vma->vm_mm;
++      bool res = false;
+       pmd_t pmd;
+ 
+       if (!arch_supports_page_table_move())
+@@ -277,19 +278,25 @@ static bool move_normal_pmd(struct vm_area_struct *vma, 
unsigned long old_addr,
+       if (new_ptl != old_ptl)
+               spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+ 
+-      /* Clear the pmd */
+       pmd = *old_pmd;
++
++      /* Racing with collapse? */
++      if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
++              goto out_unlock;
++      /* Clear the pmd */
+       pmd_clear(old_pmd);
++      res = true;
+ 
+       VM_BUG_ON(!pmd_none(*new_pmd));
+ 
+       pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
+       flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
++out_unlock:
+       if (new_ptl != old_ptl)
+               spin_unlock(new_ptl);
+       spin_unlock(old_ptl);
+ 
+-      return true;
++      return res;
+ }
+ #else
+ static inline bool move_normal_pmd(struct vm_area_struct *vma,
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 38bdc439651acf..478ba2f7c2eefd 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2106,7 +2106,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int 
type)
+ 
+       mmap_read_lock(mm);
+       for_each_vma(vmi, vma) {
+-              if (vma->anon_vma) {
++              if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
+                       ret = unuse_vma(vma, type);
+                       if (ret)
+                               break;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index bd489c1af22893..128f307da6eeac 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4300,7 +4300,7 @@ static bool sort_folio(struct lruvec *lruvec, struct 
folio *folio, struct scan_c
+       }
+ 
+       /* ineligible */
+-      if (zone > sc->reclaim_idx) {
++      if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
+               gen = folio_inc_gen(lruvec, folio, false);
+               list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+               return true;
+@@ -4940,8 +4940,8 @@ static void lru_gen_shrink_node(struct pglist_data 
*pgdat, struct scan_control *
+ 
+       blk_finish_plug(&plug);
+ done:
+-      /* kswapd should never fail */
+-      pgdat->kswapd_failures = 0;
++      if (sc->nr_reclaimed > reclaimed)
++              pgdat->kswapd_failures = 0;
+ }
+ 
+ 
/******************************************************************************
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 67604ccec2f427..e39fba5565c5d4 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -825,11 +825,14 @@ static int __init bt_init(void)
+       bt_sysfs_cleanup();
+ cleanup_led:
+       bt_leds_cleanup();
++      debugfs_remove_recursive(bt_debugfs);
+       return err;
+ }
+ 
+ static void __exit bt_exit(void)
+ {
++      iso_exit();
++
+       mgmt_exit();
+ 
+       sco_exit();
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index d5e00d0dd1a04b..c9eefb43bf47e3 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -2301,13 +2301,9 @@ int iso_init(void)
+ 
+       hci_register_cb(&iso_cb);
+ 
+-      if (IS_ERR_OR_NULL(bt_debugfs))
+-              return 0;
+-
+-      if (!iso_debugfs) {
++      if (!IS_ERR_OR_NULL(bt_debugfs))
+               iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs,
+                                                 NULL, &iso_debugfs_fops);
+-      }
+ 
+       iso_inited = true;
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 16c48df8df4cc8..8f67eea34779e1 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2342,9 +2342,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock 
*sk, int len)
+               if (len <= skb->len)
+                       break;
+ 
+-              if (unlikely(TCP_SKB_CB(skb)->eor) ||
+-                  tcp_has_tx_tstamp(skb) ||
+-                  !skb_pure_zcopy_same(skb, next))
++              if (tcp_has_tx_tstamp(skb) || !tcp_skb_can_collapse(skb, next))
+                       return false;
+ 
+               len -= skb->len;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 49c622e743e87f..2a82ed7f7d9d2e 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -950,8 +950,10 @@ static int udp_send_skb(struct sk_buff *skb, struct 
flowi4 *fl4,
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+                       skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+                                                                
cork->gso_size);
++
++                      /* Don't checksum the payload, skb will get segmented */
++                      goto csum_partial;
+               }
+-              goto csum_partial;
+       }
+ 
+       if (is_udplite)                                  /*     UDP-Lite      */
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 6602a2e9cdb532..a10181ac8e9806 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1266,8 +1266,10 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct 
flowi6 *fl6,
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+                       skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+                                                                
cork->gso_size);
++
++                      /* Don't checksum the payload, skb will get segmented */
++                      goto csum_partial;
+               }
+-              goto csum_partial;
+       }
+ 
+       if (is_udplite)
+diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
+index 79f2278434b950..3dc49c3169f20e 100644
+--- a/net/mptcp/mib.c
++++ b/net/mptcp/mib.c
+@@ -15,6 +15,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
+       SNMP_MIB_ITEM("MPCapableACKRX", MPTCP_MIB_MPCAPABLEPASSIVEACK),
+       SNMP_MIB_ITEM("MPCapableFallbackACK", 
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK),
+       SNMP_MIB_ITEM("MPCapableFallbackSYNACK", 
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
++      SNMP_MIB_ITEM("MPCapableEndpAttempt", MPTCP_MIB_MPCAPABLEENDPATTEMPT),
+       SNMP_MIB_ITEM("MPFallbackTokenInit", MPTCP_MIB_TOKENFALLBACKINIT),
+       SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
+       SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
+diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
+index 3f4ca290be6904..ac574a790465a6 100644
+--- a/net/mptcp/mib.h
++++ b/net/mptcp/mib.h
+@@ -10,6 +10,7 @@ enum linux_mptcp_mib_field {
+       MPTCP_MIB_MPCAPABLEPASSIVEACK,  /* Received third ACK with MP_CAPABLE */
+       MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way 
handshake */
+       MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way 
handshake */
++      MPTCP_MIB_MPCAPABLEENDPATTEMPT, /* Prohibited MPC to port-based endp */
+       MPTCP_MIB_TOKENFALLBACKINIT,    /* Could not init/allocate token */
+       MPTCP_MIB_RETRANSSEGS,          /* Segments retransmitted at the 
MPTCP-level */
+       MPTCP_MIB_JOINNOTOKEN,          /* Received MP_JOIN but the token was 
not found */
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 6c586b2b377bbe..ba99484c501ed2 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -869,12 +869,12 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct 
mptcp_sock *msk,
+                                i, rm_id, id, remote_id, msk->mpc_endpoint_id);
+                       spin_unlock_bh(&msk->pm.lock);
+                       mptcp_subflow_shutdown(sk, ssk, how);
++                      removed |= subflow->request_join;
+ 
+                       /* the following takes care of updating the subflows 
counter */
+                       mptcp_close_ssk(sk, ssk, subflow);
+                       spin_lock_bh(&msk->pm.lock);
+ 
+-                      removed |= subflow->request_join;
+                       if (rm_type == MPTCP_MIB_RMSUBFLOW)
+                               __MPTCP_INC_STATS(sock_net(sk), rm_type);
+               }
+@@ -1117,6 +1117,7 @@ static int mptcp_pm_nl_create_listen_socket(struct sock 
*sk,
+        */
+       inet_sk_state_store(newsk, TCP_LISTEN);
+       lock_sock(ssk);
++      WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true);
+       err = __inet_listen_sk(ssk, backlog);
+       if (!err)
+               mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 3b22313d1b86f6..049af90589ae62 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -528,6 +528,7 @@ struct mptcp_subflow_context {
+               __unused : 9;
+       bool    data_avail;
+       bool    scheduled;
++      bool    pm_listener;        /* a listener managed by the kernel PM? */
+       u32     remote_nonce;
+       u64     thmac;
+       u32     local_nonce;
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 7d14da95a28305..d4d1fddea44f3b 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -132,6 +132,13 @@ static void subflow_add_reset_reason(struct sk_buff *skb, 
u8 reason)
+       }
+ }
+ 
++static int subflow_reset_req_endp(struct request_sock *req, struct sk_buff 
*skb)
++{
++      SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEENDPATTEMPT);
++      subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
++      return -EPERM;
++}
++
+ /* Init mptcp request socket.
+  *
+  * Returns an error code if a JOIN has failed and a TCP reset
+@@ -165,6 +172,8 @@ static int subflow_check_req(struct request_sock *req,
+       if (opt_mp_capable) {
+               SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
+ 
++              if (unlikely(listener->pm_listener))
++                      return subflow_reset_req_endp(req, skb);
+               if (opt_mp_join)
+                       return 0;
+       } else if (opt_mp_join) {
+@@ -172,6 +181,8 @@ static int subflow_check_req(struct request_sock *req,
+ 
+               if (mp_opt.backup)
+                       SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
++      } else if (unlikely(listener->pm_listener)) {
++              return subflow_reset_req_endp(req, skb);
+       }
+ 
+       if (opt_mp_capable && listener->request_mptcp) {
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 4a2c8274c3df7e..843cc1ed75c3e5 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -303,6 +303,7 @@ enum {
+       CXT_FIXUP_HP_SPECTRE,
+       CXT_FIXUP_HP_GATE_MIC,
+       CXT_FIXUP_MUTE_LED_GPIO,
++      CXT_FIXUP_HP_ELITEONE_OUT_DIS,
+       CXT_FIXUP_HP_ZBOOK_MUTE_LED,
+       CXT_FIXUP_HEADSET_MIC,
+       CXT_FIXUP_HP_MIC_NO_PRESENCE,
+@@ -320,6 +321,19 @@ static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
+       spec->gen.inv_dmic_split = 1;
+ }
+ 
++/* fix widget control pin settings */
++static void cxt_fixup_update_pinctl(struct hda_codec *codec,
++                                 const struct hda_fixup *fix, int action)
++{
++      if (action == HDA_FIXUP_ACT_PROBE) {
++              /* Unset OUT_EN for this Node pin, leaving only HP_EN.
++               * This is the value stored in the codec register after
++               * the correct initialization of the previous windows boot.
++               */
++              snd_hda_set_pin_ctl_cache(codec, 0x1d, AC_PINCTL_HP_EN);
++      }
++}
++
+ static void cxt5066_increase_mic_boost(struct hda_codec *codec,
+                                  const struct hda_fixup *fix, int action)
+ {
+@@ -971,6 +985,10 @@ static const struct hda_fixup cxt_fixups[] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_mute_led_gpio,
+       },
++      [CXT_FIXUP_HP_ELITEONE_OUT_DIS] = {
++              .type = HDA_FIXUP_FUNC,
++              .v.func = cxt_fixup_update_pinctl,
++      },
+       [CXT_FIXUP_HP_ZBOOK_MUTE_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_hp_zbook_mute_led,
+@@ -1061,6 +1079,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
++      SND_PCI_QUIRK(0x103c, 0x83e5, "HP EliteOne 1000 G2", 
CXT_FIXUP_HP_ELITEONE_OUT_DIS),
+       SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", 
CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", 
CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", 
CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
+index 1150cf104985ce..4cddf84db631c6 100644
+--- a/sound/usb/mixer_scarlett2.c
++++ b/sound/usb/mixer_scarlett2.c
+@@ -5613,6 +5613,8 @@ static int scarlett2_update_filter_values(struct 
usb_mixer_interface *mixer)
+                       info->peq_flt_total_count *
+                       SCARLETT2_BIQUAD_COEFFS,
+               peq_flt_values);
++      if (err < 0)
++              return err;
+ 
+       for (i = 0, dst_idx = 0; i < info->dsp_input_count; i++) {
+               src_idx = i *
+diff --git a/tools/testing/selftests/hid/Makefile 
b/tools/testing/selftests/hid/Makefile
+index 346328e2295c30..748e0c79a27dad 100644
+--- a/tools/testing/selftests/hid/Makefile
++++ b/tools/testing/selftests/hid/Makefile
+@@ -18,6 +18,7 @@ TEST_PROGS += hid-usb_crash.sh
+ TEST_PROGS += hid-wacom.sh
+ 
+ TEST_FILES := run-hid-tools-tests.sh
++TEST_FILES += tests
+ 
+ CXX ?= $(CROSS_COMPILE)g++
+ 
+diff --git a/tools/testing/selftests/mm/uffd-common.c 
b/tools/testing/selftests/mm/uffd-common.c
+index 717539eddf9875..852e7281026ee4 100644
+--- a/tools/testing/selftests/mm/uffd-common.c
++++ b/tools/testing/selftests/mm/uffd-common.c
+@@ -18,7 +18,7 @@ bool test_uffdio_wp = true;
+ unsigned long long *count_verify;
+ uffd_test_ops_t *uffd_test_ops;
+ uffd_test_case_ops_t *uffd_test_case_ops;
+-atomic_bool ready_for_fork;
++pthread_barrier_t ready_for_fork;
+ 
+ static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
+ {
+@@ -519,7 +519,8 @@ void *uffd_poll_thread(void *arg)
+       pollfd[1].fd = pipefd[cpu*2];
+       pollfd[1].events = POLLIN;
+ 
+-      ready_for_fork = true;
++      /* Ready for parent thread to fork */
++      pthread_barrier_wait(&ready_for_fork);
+ 
+       for (;;) {
+               ret = poll(pollfd, 2, -1);
+diff --git a/tools/testing/selftests/mm/uffd-common.h 
b/tools/testing/selftests/mm/uffd-common.h
+index a70ae10b5f6206..3e6228d8e0dcc7 100644
+--- a/tools/testing/selftests/mm/uffd-common.h
++++ b/tools/testing/selftests/mm/uffd-common.h
+@@ -33,7 +33,6 @@
+ #include <inttypes.h>
+ #include <stdint.h>
+ #include <sys/random.h>
+-#include <stdatomic.h>
+ 
+ #include "../kselftest.h"
+ #include "vm_util.h"
+@@ -105,7 +104,7 @@ extern bool map_shared;
+ extern bool test_uffdio_wp;
+ extern unsigned long long *count_verify;
+ extern volatile bool test_uffdio_copy_eexist;
+-extern atomic_bool ready_for_fork;
++extern pthread_barrier_t ready_for_fork;
+ 
+ extern uffd_test_ops_t anon_uffd_test_ops;
+ extern uffd_test_ops_t shmem_uffd_test_ops;
+diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c 
b/tools/testing/selftests/mm/uffd-unit-tests.c
+index b3d21eed203dc2..c8a3b1c7edffbd 100644
+--- a/tools/testing/selftests/mm/uffd-unit-tests.c
++++ b/tools/testing/selftests/mm/uffd-unit-tests.c
+@@ -241,6 +241,9 @@ static void *fork_event_consumer(void *data)
+       fork_event_args *args = data;
+       struct uffd_msg msg = { 0 };
+ 
++      /* Ready for parent thread to fork */
++      pthread_barrier_wait(&ready_for_fork);
++
+       /* Read until a full msg received */
+       while (uffd_read_msg(args->parent_uffd, &msg));
+ 
+@@ -308,8 +311,12 @@ static int pagemap_test_fork(int uffd, bool with_event, 
bool test_pin)
+ 
+       /* Prepare a thread to resolve EVENT_FORK */
+       if (with_event) {
++              pthread_barrier_init(&ready_for_fork, NULL, 2);
+               if (pthread_create(&thread, NULL, fork_event_consumer, &args))
+                       err("pthread_create()");
++              /* Wait for child thread to start before forking */
++              pthread_barrier_wait(&ready_for_fork);
++              pthread_barrier_destroy(&ready_for_fork);
+       }
+ 
+       child = fork();
+@@ -774,7 +781,7 @@ static void uffd_sigbus_test_common(bool wp)
+       char c;
+       struct uffd_args args = { 0 };
+ 
+-      ready_for_fork = false;
++      pthread_barrier_init(&ready_for_fork, NULL, 2);
+ 
+       fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+ 
+@@ -791,8 +798,9 @@ static void uffd_sigbus_test_common(bool wp)
+       if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
+               err("uffd_poll_thread create");
+ 
+-      while (!ready_for_fork)
+-              ; /* Wait for the poll_thread to start executing before forking 
*/
++      /* Wait for child thread to start before forking */
++      pthread_barrier_wait(&ready_for_fork);
++      pthread_barrier_destroy(&ready_for_fork);
+ 
+       pid = fork();
+       if (pid < 0)
+@@ -833,7 +841,7 @@ static void uffd_events_test_common(bool wp)
+       char c;
+       struct uffd_args args = { 0 };
+ 
+-      ready_for_fork = false;
++      pthread_barrier_init(&ready_for_fork, NULL, 2);
+ 
+       fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+       if (uffd_register(uffd, area_dst, nr_pages * page_size,
+@@ -844,8 +852,9 @@ static void uffd_events_test_common(bool wp)
+       if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
+               err("uffd_poll_thread create");
+ 
+-      while (!ready_for_fork)
+-              ; /* Wait for the poll_thread to start executing before forking 
*/
++      /* Wait for child thread to start before forking */
++      pthread_barrier_wait(&ready_for_fork);
++      pthread_barrier_destroy(&ready_for_fork);
+ 
+       pid = fork();
+       if (pid < 0)
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh 
b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index cde041c93906df..05859bb387ffc5 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -23,6 +23,7 @@ tmpfile=""
+ cout=""
+ err=""
+ capout=""
++cappid=""
+ ns1=""
+ ns2=""
+ iptables="iptables"
+@@ -861,40 +862,62 @@ check_cestab()
+       fi
+ }
+ 
+-do_transfer()
++cond_start_capture()
+ {
+-      local listener_ns="$1"
+-      local connector_ns="$2"
+-      local cl_proto="$3"
+-      local srv_proto="$4"
+-      local connect_addr="$5"
+-
+-      local port=$((10000 + MPTCP_LIB_TEST_COUNTER - 1))
+-      local cappid
+-      local FAILING_LINKS=${FAILING_LINKS:-""}
+-      local fastclose=${fastclose:-""}
+-      local speed=${speed:-"fast"}
++      local ns="$1"
+ 
+-      :> "$cout"
+-      :> "$sout"
+       :> "$capout"
+ 
+       if $capture; then
+-              local capuser
+-              if [ -z $SUDO_USER ] ; then
++              local capuser capfile
++              if [ -z $SUDO_USER ]; then
+                       capuser=""
+               else
+                       capuser="-Z $SUDO_USER"
+               fi
+ 
+-              capfile=$(printf "mp_join-%02u-%s.pcap" 
"$MPTCP_LIB_TEST_COUNTER" "${listener_ns}")
++              capfile=$(printf "mp_join-%02u-%s.pcap" 
"$MPTCP_LIB_TEST_COUNTER" "$ns")
+ 
+               echo "Capturing traffic for test $MPTCP_LIB_TEST_COUNTER into 
$capfile"
+-              ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 
$capuser -w $capfile > "$capout" 2>&1 &
++              ip netns exec "$ns" tcpdump -i any -s 65535 -B 32768 $capuser 
-w "$capfile" > "$capout" 2>&1 &
+               cappid=$!
+ 
+               sleep 1
+       fi
++}
++
++cond_stop_capture()
++{
++      if $capture; then
++              sleep 1
++              kill $cappid
++              cat "$capout"
++      fi
++}
++
++get_port()
++{
++      echo "$((10000 + MPTCP_LIB_TEST_COUNTER - 1))"
++}
++
++do_transfer()
++{
++      local listener_ns="$1"
++      local connector_ns="$2"
++      local cl_proto="$3"
++      local srv_proto="$4"
++      local connect_addr="$5"
++      local port
++
++      local FAILING_LINKS=${FAILING_LINKS:-""}
++      local fastclose=${fastclose:-""}
++      local speed=${speed:-"fast"}
++      port=$(get_port)
++
++      :> "$cout"
++      :> "$sout"
++
++      cond_start_capture ${listener_ns}
+ 
+       NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
+               nstat -n
+@@ -981,10 +1004,7 @@ do_transfer()
+       wait $spid
+       local rets=$?
+ 
+-      if $capture; then
+-          sleep 1
+-          kill $cappid
+-      fi
++      cond_stop_capture
+ 
+       NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
+               nstat | grep Tcp > /tmp/${listener_ns}.out
+@@ -1000,7 +1020,6 @@ do_transfer()
+               ip netns exec ${connector_ns} ss -Menita 1>&2 -o "dport = 
:$port"
+               cat /tmp/${connector_ns}.out
+ 
+-              cat "$capout"
+               return 1
+       fi
+ 
+@@ -1017,13 +1036,7 @@ do_transfer()
+       fi
+       rets=$?
+ 
+-      if [ $retc -eq 0 ] && [ $rets -eq 0 ];then
+-              cat "$capout"
+-              return 0
+-      fi
+-
+-      cat "$capout"
+-      return 1
++      [ $retc -eq 0 ] && [ $rets -eq 0 ]
+ }
+ 
+ make_file()
+@@ -2786,6 +2799,32 @@ verify_listener_events()
+       fail_test
+ }
+ 
++chk_mpc_endp_attempt()
++{
++      local retl=$1
++      local attempts=$2
++
++      print_check "Connect"
++
++      if [ ${retl} = 124 ]; then
++              fail_test "timeout on connect"
++      elif [ ${retl} = 0 ]; then
++              fail_test "unexpected successful connect"
++      else
++              print_ok
++
++              print_check "Attempts"
++              count=$(mptcp_lib_get_counter ${ns1} 
"MPTcpExtMPCapableEndpAttempt")
++              if [ -z "$count" ]; then
++                      print_skip
++              elif [ "$count" != "$attempts" ]; then
++                      fail_test "got ${count} MPC attempt[s] on port-based 
endpoint, expected ${attempts}"
++              else
++                      print_ok
++              fi
++      fi
++}
++
+ add_addr_ports_tests()
+ {
+       # signal address with port
+@@ -2876,6 +2915,22 @@ add_addr_ports_tests()
+               chk_join_nr 2 2 2
+               chk_add_nr 2 2 2
+       fi
++
++      if reset "port-based signal endpoint must not accept mpc"; then
++              local port retl count
++              port=$(get_port)
++
++              cond_start_capture ${ns1}
++              pm_nl_add_endpoint ${ns1} 10.0.2.1 flags signal port ${port}
++              mptcp_lib_wait_local_port_listen ${ns1} ${port}
++
++              timeout 1 ip netns exec ${ns2} \
++                      ./mptcp_connect -t ${timeout_poll} -p $port -s MPTCP 
10.0.2.1 >/dev/null 2>&1
++              retl=$?
++              cond_stop_capture
++
++              chk_mpc_endp_attempt ${retl} 1
++      fi
+ }
+ 
+ syncookies_tests()


Reply via email to