commit: c53b80c8793617ab6bc636c77c557a0bbaf2c35a Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Thu Jan 20 09:59:59 2022 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Thu Jan 20 09:59:59 2022 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c53b80c8
Linux patch 5.10.93 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1092_linux-5.10.93.patch | 997 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1001 insertions(+) diff --git a/0000_README b/0000_README index 6d8b55d4..ababebf8 100644 --- a/0000_README +++ b/0000_README @@ -411,6 +411,10 @@ Patch: 1091_linux-5.10.92.patch From: http://www.kernel.org Desc: Linux 5.10.92 +Patch: 1092_linux-5.10.93.patch +From: http://www.kernel.org +Desc: Linux 5.10.93 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1092_linux-5.10.93.patch b/1092_linux-5.10.93.patch new file mode 100644 index 00000000..52fb82c6 --- /dev/null +++ b/1092_linux-5.10.93.patch @@ -0,0 +1,997 @@ +diff --git a/Makefile b/Makefile +index a113a29545bdb..993559750df9d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 10 +-SUBLEVEL = 92 ++SUBLEVEL = 93 + EXTRAVERSION = + NAME = Dare mighty things + +@@ -1073,7 +1073,7 @@ export mod_sign_cmd + HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) + + has_libelf = $(call try-run,\ +- echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) ++ echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) + + ifdef CONFIG_STACK_VALIDATION + ifeq ($(has_libelf),1) +diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c +index 3b69a76d341e7..1626dfc6f6ce6 100644 +--- a/arch/arm/kernel/perf_callchain.c ++++ b/arch/arm/kernel/perf_callchain.c +@@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail, + void + perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct frame_tail __user *tail; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } +@@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr, + void + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct stackframe fr; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } +@@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re + + unsigned long perf_instruction_pointer(struct pt_regs *regs) + { +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) +- return perf_guest_cbs->get_guest_ip(); ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); ++ ++ if (guest_cbs && guest_cbs->is_in_guest()) ++ return guest_cbs->get_guest_ip(); + + return instruction_pointer(regs); + } + + unsigned long perf_misc_flags(struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + int misc = 0; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { +- if (perf_guest_cbs->is_user_mode()) ++ if (guest_cbs && guest_cbs->is_in_guest()) { ++ if (guest_cbs->is_user_mode()) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; +diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c +index 88ff471b0bce5..58ae55d78a203 100644 +--- a/arch/arm64/kernel/perf_callchain.c ++++ b/arch/arm64/kernel/perf_callchain.c +@@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user *tail, + void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) + { +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); ++ ++ if (guest_cbs && guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } +@@ -147,9 +149,10 @@ static bool callchain_trace(void *data, unsigned long pc) + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct stackframe frame; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } +@@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + + unsigned long perf_instruction_pointer(struct pt_regs *regs) + { +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) +- return perf_guest_cbs->get_guest_ip(); ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); ++ ++ if (guest_cbs && guest_cbs->is_in_guest()) ++ return guest_cbs->get_guest_ip(); + + return instruction_pointer(regs); + } + + unsigned long perf_misc_flags(struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + int misc = 0; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { +- if (perf_guest_cbs->is_user_mode()) ++ if (guest_cbs && guest_cbs->is_in_guest()) { ++ if (guest_cbs->is_user_mode()) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; +diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c +index ab55e98ee8f62..35318a635a5fa 100644 +--- a/arch/csky/kernel/perf_callchain.c ++++ b/arch/csky/kernel/perf_callchain.c +@@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, + void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + unsigned long fp = 0; + + /* C-SKY does not support virtualization. */ +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) ++ if (guest_cbs && guest_cbs->is_in_guest()) + return; + + fp = regs->regs[4]; +@@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct stackframe fr; + + /* C-SKY does not support virtualization. */ +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + pr_warn("C-SKY does not support perf in guest mode!"); + return; + } +diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c +index 0ce6f9f307e6a..f387919607813 100644 +--- a/arch/nds32/kernel/perf_event_cpu.c ++++ b/arch/nds32/kernel/perf_event_cpu.c +@@ -1363,6 +1363,7 @@ void + perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + unsigned long fp = 0; + unsigned long gp = 0; + unsigned long lp = 0; +@@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, + + leaf_fp = 0; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } +@@ -1479,9 +1480,10 @@ void + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct stackframe fr; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } +@@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + + unsigned long perf_instruction_pointer(struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); ++ + /* However, NDS32 does not support virtualization */ +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) +- return perf_guest_cbs->get_guest_ip(); ++ if (guest_cbs && guest_cbs->is_in_guest()) ++ return guest_cbs->get_guest_ip(); + + return instruction_pointer(regs); + } + + unsigned long perf_misc_flags(struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + int misc = 0; + + /* However, NDS32 does not support virtualization */ +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { +- if (perf_guest_cbs->is_user_mode()) ++ if (guest_cbs && guest_cbs->is_in_guest()) { ++ if (guest_cbs->is_user_mode()) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; +diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h +index 3e8e19f5746c7..00c8cda1c9c31 100644 +--- a/arch/powerpc/include/asm/hvcall.h ++++ b/arch/powerpc/include/asm/hvcall.h +@@ -382,6 +382,8 @@ + #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 + #define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 + #define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6 ++#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7 ++#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8 + + /* Flag values used in H_REGISTER_PROC_TBL hcall */ + #define PROC_TABLE_OP_MASK 0x18 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c +index 5f0d446a2325e..47dfada140e19 100644 +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -538,6 +538,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) + if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + ++ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY) ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); ++ ++ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS) ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); ++ + if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); + } +diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c +index cf190197a22f6..ad3001cbdf618 100644 +--- a/arch/riscv/kernel/perf_callchain.c ++++ b/arch/riscv/kernel/perf_callchain.c +@@ -60,10 +60,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, + void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + unsigned long fp = 0; + + /* RISC-V does not support perf in guest mode. */ +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) ++ if (guest_cbs && guest_cbs->is_in_guest()) + return; + + fp = regs->s0; +@@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task_struct *task, + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); ++ + /* RISC-V does not support perf in guest mode. */ +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + pr_warn("RISC-V does not support perf in guest mode!"); + return; + } +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c +index e6c4f29fc6956..b51ab19eb9721 100644 +--- a/arch/s390/kvm/interrupt.c ++++ b/arch/s390/kvm/interrupt.c +@@ -2115,6 +2115,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) + return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); + } + ++int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu) ++{ ++ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; ++ ++ return test_bit(IRQ_PEND_RESTART, &li->pending_irqs); ++} ++ + void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) + { + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c +index 00f03f363c9b0..07a04f3926009 100644 +--- a/arch/s390/kvm/kvm-s390.c ++++ b/arch/s390/kvm/kvm-s390.c +@@ -4588,10 +4588,15 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) + } + } + +- /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ ++ /* ++ * Set the VCPU to STOPPED and THEN clear the interrupt flag, ++ * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders ++ * have been fully processed. This will ensure that the VCPU ++ * is kept BUSY if another VCPU is inquiring with SIGP SENSE. ++ */ ++ kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); + kvm_s390_clear_stop_irq(vcpu); + +- kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); + __disable_ibs_on_vcpu(vcpu); + + for (i = 0; i < online_vcpus; i++) { +diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h +index 2d134833bca69..a3e9b71d426f9 100644 +--- a/arch/s390/kvm/kvm-s390.h ++++ b/arch/s390/kvm/kvm-s390.h +@@ -418,6 +418,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm); + int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); + extern struct kvm_device_ops kvm_flic_ops; + int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); ++int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu); + void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); + int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, + void __user *buf, int len); +diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c +index 683036c1c92a8..3dc921e853b6e 100644 +--- a/arch/s390/kvm/sigp.c ++++ b/arch/s390/kvm/sigp.c +@@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, + if (!dst_vcpu) + return SIGP_CC_NOT_OPERATIONAL; + ++ /* ++ * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders ++ * are processed asynchronously. Until the affected VCPU finishes ++ * its work and calls back into KVM to clear the (RESTART or STOP) ++ * interrupt, we need to return any new non-reset orders "busy". ++ * ++ * This is important because a single VCPU could issue: ++ * 1) SIGP STOP $DESTINATION ++ * 2) SIGP SENSE $DESTINATION ++ * ++ * If the SIGP SENSE would not be rejected as "busy", it could ++ * return an incorrect answer as to whether the VCPU is STOPPED ++ * or OPERATING. ++ */ ++ if (order_code != SIGP_INITIAL_CPU_RESET && ++ order_code != SIGP_CPU_RESET) { ++ /* ++ * Lockless check. Both SIGP STOP and SIGP (RE)START ++ * properly synchronize everything while processing ++ * their orders, while the guest cannot observe a ++ * difference when issuing other orders from two ++ * different VCPUs. ++ */ ++ if (kvm_s390_is_stop_irq_pending(dst_vcpu) || ++ kvm_s390_is_restart_irq_pending(dst_vcpu)) ++ return SIGP_CC_BUSY; ++ } ++ + switch (order_code) { + case SIGP_SENSE: + vcpu->stat.instruction_sigp_sense++; +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c +index 1f5d96ba4866d..b79b9f21cbb3b 100644 +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -2545,10 +2545,11 @@ static bool perf_hw_regs(struct pt_regs *regs) + void + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct unwind_state state; + unsigned long addr; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + /* TODO: We don't support guest os callchain now */ + return; + } +@@ -2648,10 +2649,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent + void + perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct stack_frame frame; + const struct stack_frame __user *fp; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ++ if (guest_cbs && guest_cbs->is_in_guest()) { + /* TODO: We don't support guest os callchain now */ + return; + } +@@ -2728,18 +2730,21 @@ static unsigned long code_segment_base(struct pt_regs *regs) + + unsigned long perf_instruction_pointer(struct pt_regs *regs) + { +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) +- return perf_guest_cbs->get_guest_ip(); ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); ++ ++ if (guest_cbs && guest_cbs->is_in_guest()) ++ return guest_cbs->get_guest_ip(); + + return regs->ip + code_segment_base(regs); + } + + unsigned long perf_misc_flags(struct pt_regs *regs) + { ++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + int misc = 0; + +- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { +- if (perf_guest_cbs->is_user_mode()) ++ if (guest_cbs && guest_cbs->is_in_guest()) { ++ if (guest_cbs->is_user_mode()) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index a521135247eb6..6525693e7aeaa 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -2586,6 +2586,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) + { + struct perf_sample_data data; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); ++ struct perf_guest_info_callbacks *guest_cbs; + int bit; + int handled = 0; + +@@ -2651,9 +2652,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) + */ + if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { + handled++; +- if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() && +- perf_guest_cbs->handle_intel_pt_intr)) +- perf_guest_cbs->handle_intel_pt_intr(); ++ ++ guest_cbs = perf_get_guest_cbs(); ++ if (unlikely(guest_cbs && guest_cbs->is_in_guest() && ++ guest_cbs->handle_intel_pt_intr)) ++ guest_cbs->handle_intel_pt_intr(); + else + intel_pt_interrupt(); + } +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index e8fb4b0394af2..13e10b970ac83 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1306,6 +1306,7 @@ struct kvm_x86_init_ops { + int (*disabled_by_bios)(void); + int (*check_processor_compatibility)(void); + int (*hardware_setup)(void); ++ bool (*intel_pt_intr_in_guest)(void); + + struct kvm_x86_ops *runtime_ops; + }; +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 38c453f28f1f0..351ef5cf1436a 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -7915,6 +7915,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = { + .disabled_by_bios = vmx_disabled_by_bios, + .check_processor_compatibility = vmx_check_processor_compat, + .hardware_setup = hardware_setup, ++ .intel_pt_intr_in_guest = vmx_pt_mode_is_host_guest, + + .runtime_ops = &vmx_x86_ops, + }; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 4f828cac0273e..271669dc8d90a 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1229,7 +1229,7 @@ static const u32 msrs_to_save_all[] = { + MSR_IA32_UMWAIT_CONTROL, + + MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, +- MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3, ++ MSR_ARCH_PERFMON_FIXED_CTR0 + 2, + MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, + MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, + MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, +@@ -7882,7 +7882,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { + .is_in_guest = kvm_is_in_guest, + .is_user_mode = kvm_is_user_mode, + .get_guest_ip = kvm_get_guest_ip, +- .handle_intel_pt_intr = kvm_handle_intel_pt_intr, ++ .handle_intel_pt_intr = NULL, + }; + + #ifdef CONFIG_X86_64 +@@ -8005,6 +8005,8 @@ int kvm_arch_init(void *opaque) + PT_PRESENT_MASK, 0, sme_me_mask); + kvm_timer_init(); + ++ if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest()) ++ kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr; + perf_register_guest_info_callbacks(&kvm_guest_cbs); + + if (boot_cpu_has(X86_FEATURE_XSAVE)) { +@@ -8042,6 +8044,7 @@ void kvm_arch_exit(void) + #endif + kvm_lapic_exit(); + perf_unregister_guest_info_callbacks(&kvm_guest_cbs); ++ kvm_guest_cbs.handle_intel_pt_intr = NULL; + + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, +diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c +index a71d141179439..b5cbaa61cbea7 100644 +--- a/drivers/base/devtmpfs.c ++++ b/drivers/base/devtmpfs.c +@@ -59,8 +59,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla + const char *dev_name, void *data) + { + struct super_block *s = mnt->mnt_sb; ++ int err; ++ + atomic_inc(&s->s_active); + down_write(&s->s_umount); ++ err = reconfigure_single(s, flags, data); ++ if (err < 0) { ++ deactivate_locked_super(s); ++ return ERR_PTR(err); ++ } + return dget(s->s_root); + } + +diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c +index 172c751a4f6c2..f08e056ed0ae4 100644 +--- a/drivers/firmware/qemu_fw_cfg.c ++++ b/drivers/firmware/qemu_fw_cfg.c +@@ -388,9 +388,7 @@ static void fw_cfg_sysfs_cache_cleanup(void) + struct fw_cfg_sysfs_entry *entry, *next; + + list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) { +- /* will end up invoking fw_cfg_sysfs_cache_delist() +- * via each object's release() method (i.e. destructor) +- */ ++ fw_cfg_sysfs_cache_delist(entry); + kobject_put(&entry->kobj); + } + } +@@ -448,7 +446,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj) + { + struct fw_cfg_sysfs_entry *entry = to_entry(kobj); + +- fw_cfg_sysfs_cache_delist(entry); + kfree(entry); + } + +@@ -601,20 +598,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) + /* set file entry information */ + entry->size = be32_to_cpu(f->size); + entry->select = be16_to_cpu(f->select); +- memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); ++ strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); + + /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ + err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, + fw_cfg_sel_ko, "%d", entry->select); +- if (err) { +- kobject_put(&entry->kobj); +- return err; +- } ++ if (err) ++ goto err_put_entry; + + /* add raw binary content access */ + err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); + if (err) +- goto err_add_raw; ++ goto err_del_entry; + + /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */ + fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name); +@@ -623,9 +618,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) + fw_cfg_sysfs_cache_enlist(entry); + return 0; + +-err_add_raw: ++err_del_entry: + kobject_del(&entry->kobj); +- kfree(entry); ++err_put_entry: ++ kobject_put(&entry->kobj); + return err; + } + +diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c +index b8477fa93b7d7..f6373d678d256 100644 +--- a/drivers/media/usb/uvc/uvc_video.c ++++ b/drivers/media/usb/uvc/uvc_video.c +@@ -1915,6 +1915,10 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream, + if (ep == NULL) + return -EIO; + ++ /* Reject broken descriptors. */ ++ if (usb_endpoint_maxp(&ep->desc) == 0) ++ return -EIO; ++ + ret = uvc_init_video_bulk(stream, ep, gfp_flags); + } + +diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig +index aef14990e5f7c..19726ebd973d0 100644 +--- a/drivers/mtd/chips/Kconfig ++++ b/drivers/mtd/chips/Kconfig +@@ -55,12 +55,14 @@ choice + LITTLE_ENDIAN_BYTE, if the bytes are reversed. + + config MTD_CFI_NOSWAP ++ depends on !ARCH_IXP4XX || CPU_BIG_ENDIAN + bool "NO" + + config MTD_CFI_BE_BYTE_SWAP + bool "BIG_ENDIAN_BYTE" + + config MTD_CFI_LE_BYTE_SWAP ++ depends on !ARCH_IXP4XX + bool "LITTLE_ENDIAN_BYTE" + + endchoice +diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig +index 6650acbc961e9..fc0aaa03c5242 100644 +--- a/drivers/mtd/maps/Kconfig ++++ b/drivers/mtd/maps/Kconfig +@@ -325,7 +325,7 @@ config MTD_DC21285 + + config MTD_IXP4XX + tristate "CFI Flash device mapped on Intel IXP4xx based systems" +- depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX ++ depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX && MTD_CFI_ADV_OPTIONS + help + This enables MTD access to flash devices on platforms based + on Intel's IXP4xx family of network processors such as the +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +index 6312fddd9c00a..eaba661133280 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +@@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) + _initpabias(hw); + rtl92c_dm_init(hw); + exit: ++ local_irq_disable(); + local_irq_restore(flags); + return err; + } +diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c +index 7c007dd7b2000..aca21560e20b8 100644 +--- a/drivers/remoteproc/qcom_pil_info.c ++++ b/drivers/remoteproc/qcom_pil_info.c +@@ -104,7 +104,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size) + return -ENOMEM; + + found_unused: +- memcpy_toio(entry, image, PIL_RELOC_NAME_LEN); ++ memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN)); + found_existing: + /* Use two writel() as base is only aligned to 4 bytes on odd entries */ + writel(base, entry + PIL_RELOC_NAME_LEN); +diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c +index 1e8a38a7967d8..5c6e9dc88060b 100644 +--- a/drivers/video/fbdev/vga16fb.c ++++ b/drivers/video/fbdev/vga16fb.c +@@ -184,6 +184,25 @@ static inline void setindex(int index) + vga_io_w(VGA_GFX_I, index); + } + ++/* Check if the video mode is supported by the driver */ ++static inline int check_mode_supported(void) ++{ ++ /* non-x86 architectures treat orig_video_isVGA as a boolean flag */ ++#if defined(CONFIG_X86) ++ /* only EGA and VGA in 16 color graphic mode are supported */ ++ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC && ++ screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC) ++ return -ENODEV; ++ ++ if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */ ++ screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */ ++ screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */ ++ screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */ ++ return -ENODEV; ++#endif ++ return 0; ++} ++ + static void vga16fb_pan_var(struct fb_info *info, + struct fb_var_screeninfo *var) + { +@@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void) + + vga16fb_setup(option); + #endif ++ ++ ret = check_mode_supported(); ++ if (ret) ++ return ret; ++ + ret = platform_driver_register(&vga16fb_driver); + + if (!ret) { +diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c +index 72b67d810b8c2..a13ef836fe4e1 100644 +--- a/fs/9p/vfs_inode_dotl.c ++++ b/fs/9p/vfs_inode_dotl.c +@@ -541,7 +541,10 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) + { + int retval; + struct p9_fid *fid = NULL; +- struct p9_iattr_dotl p9attr; ++ struct p9_iattr_dotl p9attr = { ++ .uid = INVALID_UID, ++ .gid = INVALID_GID, ++ }; + struct inode *inode = d_inode(dentry); + + p9_debug(P9_DEBUG_VFS, "\n"); +@@ -551,14 +554,22 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) + return retval; + + p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid); +- p9attr.mode = iattr->ia_mode; +- p9attr.uid = iattr->ia_uid; +- p9attr.gid = iattr->ia_gid; +- p9attr.size = iattr->ia_size; +- p9attr.atime_sec = iattr->ia_atime.tv_sec; +- p9attr.atime_nsec = iattr->ia_atime.tv_nsec; +- p9attr.mtime_sec = iattr->ia_mtime.tv_sec; +- p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; ++ if (iattr->ia_valid & ATTR_MODE) ++ p9attr.mode = iattr->ia_mode; ++ if (iattr->ia_valid & ATTR_UID) ++ p9attr.uid = iattr->ia_uid; ++ if (iattr->ia_valid & ATTR_GID) ++ p9attr.gid = iattr->ia_gid; ++ if (iattr->ia_valid & ATTR_SIZE) ++ p9attr.size = iattr->ia_size; ++ if (iattr->ia_valid & ATTR_ATIME_SET) { ++ p9attr.atime_sec = iattr->ia_atime.tv_sec; ++ p9attr.atime_nsec = iattr->ia_atime.tv_nsec; ++ } ++ if (iattr->ia_valid & ATTR_MTIME_SET) { ++ p9attr.mtime_sec = iattr->ia_mtime.tv_sec; ++ p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; ++ } + + if (iattr->ia_valid & ATTR_FILE) { + fid = iattr->ia_file->private_data; +diff --git a/fs/fs_context.c b/fs/fs_context.c +index 2834d1afa6e80..b11677802ee13 100644 +--- a/fs/fs_context.c ++++ b/fs/fs_context.c +@@ -530,7 +530,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) + param->key); + } + +- if (len > PAGE_SIZE - 2 - size) ++ if (size + len + 2 > PAGE_SIZE) + return invalf(fc, "VFS: Legacy: Cumulative options too large"); + if (strchr(param->key, ',') || + (param->type == fs_value_is_string && +diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c +index 538e839590ef5..b501dc07f9222 100644 +--- a/fs/orangefs/orangefs-bufmap.c ++++ b/fs/orangefs/orangefs-bufmap.c +@@ -176,7 +176,7 @@ orangefs_bufmap_free(struct orangefs_bufmap *bufmap) + { + kfree(bufmap->page_array); + kfree(bufmap->desc_array); +- kfree(bufmap->buffer_index_array); ++ bitmap_free(bufmap->buffer_index_array); + kfree(bufmap); + } + +@@ -226,8 +226,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) + bufmap->desc_size = user_desc->size; + bufmap->desc_shift = ilog2(bufmap->desc_size); + +- bufmap->buffer_index_array = +- kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL); ++ bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL); + if (!bufmap->buffer_index_array) + goto out_free_bufmap; + +@@ -250,7 +249,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) + out_free_desc_array: + kfree(bufmap->desc_array); + out_free_index_array: +- kfree(bufmap->buffer_index_array); ++ bitmap_free(bufmap->buffer_index_array); + out_free_bufmap: + kfree(bufmap); + out: +diff --git a/fs/super.c b/fs/super.c +index 98bb0629ee108..20f1707807bbd 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -1472,8 +1472,8 @@ struct dentry *mount_nodev(struct file_system_type *fs_type, + } + EXPORT_SYMBOL(mount_nodev); + +-static int reconfigure_single(struct super_block *s, +- int flags, void *data) ++int reconfigure_single(struct super_block *s, ++ int flags, void *data) + { + struct fs_context *fc; + int ret; +diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h +index 5b44b0195a28a..e869ce3ae6600 100644 +--- a/include/linux/fs_context.h ++++ b/include/linux/fs_context.h +@@ -140,6 +140,8 @@ extern int generic_parse_monolithic(struct fs_context *fc, void *data); + extern int vfs_get_tree(struct fs_context *fc); + extern void put_fs_context(struct fs_context *fc); + extern void fc_drop_locked(struct fs_context *fc); ++int reconfigure_single(struct super_block *s, ++ int flags, void *data); + + /* + * sget() wrappers to be called from the ->get_tree() op. +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index ce14fb2772b5b..c94551091dad3 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -1235,7 +1235,18 @@ extern void perf_event_bpf_event(struct bpf_prog *prog, + enum perf_bpf_event_type type, + u16 flags); + +-extern struct perf_guest_info_callbacks *perf_guest_cbs; ++extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs; ++static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void) ++{ ++ /* ++ * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading ++ * the callbacks between a !NULL check and dereferences, to ensure ++ * pending stores/changes to the callback pointers are visible before a ++ * non-NULL perf_guest_cbs is visible to readers, and to prevent a ++ * module from unloading callbacks while readers are active. ++ */ ++ return rcu_dereference(perf_guest_cbs); ++} + extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); + extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 639b99a318db1..e2d774cc470ee 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -6395,18 +6395,25 @@ static void perf_pending_event(struct irq_work *entry) + * Later on, we might change it to a list if there is + * another virtualization implementation supporting the callbacks. + */ +-struct perf_guest_info_callbacks *perf_guest_cbs; ++struct perf_guest_info_callbacks __rcu *perf_guest_cbs; + + int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) + { +- perf_guest_cbs = cbs; ++ if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs))) ++ return -EBUSY; ++ ++ rcu_assign_pointer(perf_guest_cbs, cbs); + return 0; + } + EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); + + int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) + { +- perf_guest_cbs = NULL; ++ if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs)) ++ return -EINVAL; ++ ++ rcu_assign_pointer(perf_guest_cbs, NULL); ++ synchronize_rcu(); + return 0; + } + EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 14ce48f1a8e47..a858bb9e99270 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -1936,6 +1936,7 @@ enum { + ALC887_FIXUP_ASUS_BASS, + ALC887_FIXUP_BASS_CHMAP, + ALC1220_FIXUP_GB_DUAL_CODECS, ++ ALC1220_FIXUP_GB_X570, + ALC1220_FIXUP_CLEVO_P950, + ALC1220_FIXUP_CLEVO_PB51ED, + ALC1220_FIXUP_CLEVO_PB51ED_PINS, +@@ -2125,6 +2126,29 @@ static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec, + } + } + ++static void alc1220_fixup_gb_x570(struct hda_codec *codec, ++ const struct hda_fixup *fix, ++ int action) ++{ ++ static const hda_nid_t conn1[] = { 0x0c }; ++ static const struct coef_fw gb_x570_coefs[] = { ++ WRITE_COEF(0x1a, 0x01c1), ++ WRITE_COEF(0x1b, 0x0202), ++ WRITE_COEF(0x43, 0x3005), ++ {} ++ }; ++ ++ switch (action) { ++ case HDA_FIXUP_ACT_PRE_PROBE: ++ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); ++ snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1); ++ break; ++ case HDA_FIXUP_ACT_INIT: ++ alc_process_coef_fw(codec, gb_x570_coefs); ++ break; ++ } ++} ++ + static void alc1220_fixup_clevo_p950(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +@@ -2427,6 +2451,10 @@ static const struct hda_fixup alc882_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc1220_fixup_gb_dual_codecs, + }, ++ [ALC1220_FIXUP_GB_X570] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc1220_fixup_gb_x570, ++ }, + [ALC1220_FIXUP_CLEVO_P950] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc1220_fixup_clevo_p950, +@@ -2529,7 +2557,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD), + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), +- SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), ++ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570), + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), +@@ -6729,6 +6757,8 @@ enum { + ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, + ALC233_FIXUP_NO_AUDIO_JACK, + ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME, ++ ALC285_FIXUP_LEGION_Y9000X_SPEAKERS, ++ ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -8319,6 +8349,18 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, + }, ++ [ALC285_FIXUP_LEGION_Y9000X_SPEAKERS] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc285_fixup_ideapad_s740_coef, ++ .chained = true, ++ .chain_id = ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, ++ }, ++ [ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc287_fixup_legion_15imhg05_speakers, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI, ++ }, + [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = { + .type = HDA_FIXUP_VERBS, + //.v.verbs = legion_15imhg05_coefs, +@@ -8857,13 +8899,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), ++ SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), ++ SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), ++ SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), ++ SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), +- SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), ++ SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), +- SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
