commit:     bac505d232815166e38c0d9ca27763fb12fc664d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 20 13:45:08 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan 20 13:45:08 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bac505d2

Linux patch 5.16.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1001_linux-5.16.2.patch | 1219 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1223 insertions(+)

diff --git a/0000_README b/0000_README
index 4a1aa215..41c5d786 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-5.16.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.16.1
 
+Patch:  1001_linux-5.16.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.16.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-5.16.2.patch b/1001_linux-5.16.2.patch
new file mode 100644
index 00000000..33650f08
--- /dev/null
+++ b/1001_linux-5.16.2.patch
@@ -0,0 +1,1219 @@
+diff --git a/Makefile b/Makefile
+index fdbd06daf2af1..dd98debc26048 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 16
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Gobble Gobble
+ 
+diff --git a/arch/arm/kernel/perf_callchain.c 
b/arch/arm/kernel/perf_callchain.c
+index 3b69a76d341e7..1626dfc6f6ce6 100644
+--- a/arch/arm/kernel/perf_callchain.c
++++ b/arch/arm/kernel/perf_callchain.c
+@@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail,
+ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs 
*regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct frame_tail __user *tail;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr,
+ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs 
*regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stackframe fr;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx 
*entry, struct pt_regs *re
+ 
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+-              return perf_guest_cbs->get_guest_ip();
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++      if (guest_cbs && guest_cbs->is_in_guest())
++              return guest_cbs->get_guest_ip();
+ 
+       return instruction_pointer(regs);
+ }
+ 
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       int misc = 0;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+-              if (perf_guest_cbs->is_user_mode())
++      if (guest_cbs && guest_cbs->is_in_guest()) {
++              if (guest_cbs->is_user_mode())
+                       misc |= PERF_RECORD_MISC_GUEST_USER;
+               else
+                       misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+diff --git a/arch/arm64/kernel/perf_callchain.c 
b/arch/arm64/kernel/perf_callchain.c
+index 4a72c27273097..86d9f20131723 100644
+--- a/arch/arm64/kernel/perf_callchain.c
++++ b/arch/arm64/kernel/perf_callchain.c
+@@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user 
*tail,
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                        struct pt_regs *regs)
+ {
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -147,9 +149,10 @@ static bool callchain_trace(void *data, unsigned long pc)
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                          struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stackframe frame;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -160,18 +163,21 @@ void perf_callchain_kernel(struct 
perf_callchain_entry_ctx *entry,
+ 
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+-              return perf_guest_cbs->get_guest_ip();
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++      if (guest_cbs && guest_cbs->is_in_guest())
++              return guest_cbs->get_guest_ip();
+ 
+       return instruction_pointer(regs);
+ }
+ 
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       int misc = 0;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+-              if (perf_guest_cbs->is_user_mode())
++      if (guest_cbs && guest_cbs->is_in_guest()) {
++              if (guest_cbs->is_user_mode())
+                       misc |= PERF_RECORD_MISC_GUEST_USER;
+               else
+                       misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+diff --git a/arch/csky/kernel/perf_callchain.c 
b/arch/csky/kernel/perf_callchain.c
+index ab55e98ee8f62..35318a635a5fa 100644
+--- a/arch/csky/kernel/perf_callchain.c
++++ b/arch/csky/kernel/perf_callchain.c
+@@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct 
perf_callchain_entry_ctx *entry,
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                        struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       unsigned long fp = 0;
+ 
+       /* C-SKY does not support virtualization. */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
++      if (guest_cbs && guest_cbs->is_in_guest())
+               return;
+ 
+       fp = regs->regs[4];
+@@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx 
*entry,
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                          struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stackframe fr;
+ 
+       /* C-SKY does not support virtualization. */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               pr_warn("C-SKY does not support perf in guest mode!");
+               return;
+       }
+diff --git a/arch/nds32/kernel/perf_event_cpu.c 
b/arch/nds32/kernel/perf_event_cpu.c
+index 0ce6f9f307e6a..f387919607813 100644
+--- a/arch/nds32/kernel/perf_event_cpu.c
++++ b/arch/nds32/kernel/perf_event_cpu.c
+@@ -1363,6 +1363,7 @@ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                   struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       unsigned long fp = 0;
+       unsigned long gp = 0;
+       unsigned long lp = 0;
+@@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx 
*entry,
+ 
+       leaf_fp = 0;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -1479,9 +1480,10 @@ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                     struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stackframe fr;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx 
*entry,
+ 
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
+       /* However, NDS32 does not support virtualization */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+-              return perf_guest_cbs->get_guest_ip();
++      if (guest_cbs && guest_cbs->is_in_guest())
++              return guest_cbs->get_guest_ip();
+ 
+       return instruction_pointer(regs);
+ }
+ 
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       int misc = 0;
+ 
+       /* However, NDS32 does not support virtualization */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+-              if (perf_guest_cbs->is_user_mode())
++      if (guest_cbs && guest_cbs->is_in_guest()) {
++              if (guest_cbs->is_user_mode())
+                       misc |= PERF_RECORD_MISC_GUEST_USER;
+               else
+                       misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+diff --git a/arch/riscv/kernel/perf_callchain.c 
b/arch/riscv/kernel/perf_callchain.c
+index 0bb1854dce833..8ecfc4c128bc5 100644
+--- a/arch/riscv/kernel/perf_callchain.c
++++ b/arch/riscv/kernel/perf_callchain.c
+@@ -56,10 +56,11 @@ static unsigned long user_backtrace(struct 
perf_callchain_entry_ctx *entry,
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                        struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       unsigned long fp = 0;
+ 
+       /* RISC-V does not support perf in guest mode. */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
++      if (guest_cbs && guest_cbs->is_in_guest())
+               return;
+ 
+       fp = regs->s0;
+@@ -78,8 +79,10 @@ static bool fill_callchain(void *entry, unsigned long pc)
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                          struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
+       /* RISC-V does not support perf in guest mode. */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               pr_warn("RISC-V does not support perf in guest mode!");
+               return;
+       }
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index c3bd993fdd0cf..0576d5c991384 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -2115,6 +2115,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
+       return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ }
+ 
++int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
++{
++      struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
++
++      return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
++}
++
+ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 14a18ba5ff2c8..ef299aad40090 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -4645,10 +4645,15 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
+               }
+       }
+ 
+-      /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
++      /*
++       * Set the VCPU to STOPPED and THEN clear the interrupt flag,
++       * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
++       * have been fully processed. This will ensure that the VCPU
++       * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
++       */
++      kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
+       kvm_s390_clear_stop_irq(vcpu);
+ 
+-      kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
+       __disable_ibs_on_vcpu(vcpu);
+ 
+       for (i = 0; i < online_vcpus; i++) {
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index c07a050d757d3..1876ab0c293fe 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -427,6 +427,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm);
+ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
+ extern struct kvm_device_ops kvm_flic_ops;
+ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
++int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
+ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
+ int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
+                          void __user *buf, int len);
+diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
+index cf4de80bd5410..8aaee2892ec35 100644
+--- a/arch/s390/kvm/sigp.c
++++ b/arch/s390/kvm/sigp.c
+@@ -276,6 +276,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 
order_code,
+       if (!dst_vcpu)
+               return SIGP_CC_NOT_OPERATIONAL;
+ 
++      /*
++       * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
++       * are processed asynchronously. Until the affected VCPU finishes
++       * its work and calls back into KVM to clear the (RESTART or STOP)
++       * interrupt, we need to return any new non-reset orders "busy".
++       *
++       * This is important because a single VCPU could issue:
++       *  1) SIGP STOP $DESTINATION
++       *  2) SIGP SENSE $DESTINATION
++       *
++       * If the SIGP SENSE would not be rejected as "busy", it could
++       * return an incorrect answer as to whether the VCPU is STOPPED
++       * or OPERATING.
++       */
++      if (order_code != SIGP_INITIAL_CPU_RESET &&
++          order_code != SIGP_CPU_RESET) {
++              /*
++               * Lockless check. Both SIGP STOP and SIGP (RE)START
++               * properly synchronize everything while processing
++               * their orders, while the guest cannot observe a
++               * difference when issuing other orders from two
++               * different VCPUs.
++               */
++              if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
++                  kvm_s390_is_restart_irq_pending(dst_vcpu))
++                      return SIGP_CC_BUSY;
++      }
++
+       switch (order_code) {
+       case SIGP_SENSE:
+               vcpu->stat.instruction_sigp_sense++;
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 38b2c779146f1..32cec290d3ad6 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2768,10 +2768,11 @@ static bool perf_hw_regs(struct pt_regs *regs)
+ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs 
*regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct unwind_state state;
+       unsigned long addr;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return;
+       }
+@@ -2871,10 +2872,11 @@ perf_callchain_user32(struct pt_regs *regs, struct 
perf_callchain_entry_ctx *ent
+ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs 
*regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stack_frame frame;
+       const struct stack_frame __user *fp;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return;
+       }
+@@ -2951,18 +2953,21 @@ static unsigned long code_segment_base(struct pt_regs 
*regs)
+ 
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+-              return perf_guest_cbs->get_guest_ip();
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++      if (guest_cbs && guest_cbs->is_in_guest())
++              return guest_cbs->get_guest_ip();
+ 
+       return regs->ip + code_segment_base(regs);
+ }
+ 
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       int misc = 0;
+ 
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+-              if (perf_guest_cbs->is_user_mode())
++      if (guest_cbs && guest_cbs->is_in_guest()) {
++              if (guest_cbs->is_user_mode())
+                       misc |= PERF_RECORD_MISC_GUEST_USER;
+               else
+                       misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index ec6444f2c9dcb..1e33c75ffa260 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2835,6 +2835,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 
status)
+ {
+       struct perf_sample_data data;
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++      struct perf_guest_info_callbacks *guest_cbs;
+       int bit;
+       int handled = 0;
+       u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
+@@ -2901,9 +2902,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 
status)
+        */
+       if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned 
long *)&status)) {
+               handled++;
+-              if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
+-                      perf_guest_cbs->handle_intel_pt_intr))
+-                      perf_guest_cbs->handle_intel_pt_intr();
++
++              guest_cbs = perf_get_guest_cbs();
++              if (unlikely(guest_cbs && guest_cbs->is_in_guest() &&
++                           guest_cbs->handle_intel_pt_intr))
++                      guest_cbs->handle_intel_pt_intr();
+               else
+                       intel_pt_interrupt();
+       }
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 555f4de47ef29..59fc339ba5282 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1519,6 +1519,7 @@ struct kvm_x86_init_ops {
+       int (*disabled_by_bios)(void);
+       int (*check_processor_compatibility)(void);
+       int (*hardware_setup)(void);
++      bool (*intel_pt_intr_in_guest)(void);
+ 
+       struct kvm_x86_ops *runtime_ops;
+ };
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index f206fc35deff6..7c009867d6f23 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -676,31 +676,25 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
+ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
+ {
+       u8 val;
+-      if (pv_eoi_get_user(vcpu, &val) < 0) {
+-              printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
+-                         (unsigned long long)vcpu->arch.pv_eoi.msr_val);
++      if (pv_eoi_get_user(vcpu, &val) < 0)
+               return false;
+-      }
++
+       return val & KVM_PV_EOI_ENABLED;
+ }
+ 
+ static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
+ {
+-      if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
+-              printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
+-                         (unsigned long long)vcpu->arch.pv_eoi.msr_val);
++      if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
+               return;
+-      }
++
+       __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
+ }
+ 
+ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
+ {
+-      if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
+-              printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
+-                         (unsigned long long)vcpu->arch.pv_eoi.msr_val);
++      if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
+               return;
+-      }
++
+       __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
+ }
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 0dbf94eb954fd..7f4e6f625abcf 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7905,6 +7905,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = 
{
+       .disabled_by_bios = vmx_disabled_by_bios,
+       .check_processor_compatibility = vmx_check_processor_compat,
+       .hardware_setup = hardware_setup,
++      .intel_pt_intr_in_guest = vmx_pt_mode_is_host_guest,
+ 
+       .runtime_ops = &vmx_x86_ops,
+ };
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e50e97ac44084..0b5c61bb24a17 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8560,7 +8560,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
+       .is_in_guest            = kvm_is_in_guest,
+       .is_user_mode           = kvm_is_user_mode,
+       .get_guest_ip           = kvm_get_guest_ip,
+-      .handle_intel_pt_intr   = kvm_handle_intel_pt_intr,
++      .handle_intel_pt_intr   = NULL,
+ };
+ 
+ #ifdef CONFIG_X86_64
+@@ -8676,8 +8676,6 @@ int kvm_arch_init(void *opaque)
+ 
+       kvm_timer_init();
+ 
+-      perf_register_guest_info_callbacks(&kvm_guest_cbs);
+-
+       if (boot_cpu_has(X86_FEATURE_XSAVE)) {
+               host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+               supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
+@@ -8709,7 +8707,6 @@ void kvm_arch_exit(void)
+               clear_hv_tscchange_cb();
+ #endif
+       kvm_lapic_exit();
+-      perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+ 
+       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+               cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
+@@ -11269,6 +11266,10 @@ int kvm_arch_hardware_setup(void *opaque)
+       memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
+       kvm_ops_static_call_update();
+ 
++      if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
++              kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
++      perf_register_guest_info_callbacks(&kvm_guest_cbs);
++
+       if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
+               supported_xss = 0;
+ 
+@@ -11296,6 +11297,9 @@ int kvm_arch_hardware_setup(void *opaque)
+ 
+ void kvm_arch_hardware_unsetup(void)
+ {
++      perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
++      kvm_guest_cbs.handle_intel_pt_intr = NULL;
++
+       static_call(kvm_x86_hardware_unsetup)();
+ }
+ 
+diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
+index 8be352ab4ddbf..fa13ad49d2116 100644
+--- a/drivers/base/devtmpfs.c
++++ b/drivers/base/devtmpfs.c
+@@ -59,8 +59,15 @@ static struct dentry *public_dev_mount(struct 
file_system_type *fs_type, int fla
+                     const char *dev_name, void *data)
+ {
+       struct super_block *s = mnt->mnt_sb;
++      int err;
++
+       atomic_inc(&s->s_active);
+       down_write(&s->s_umount);
++      err = reconfigure_single(s, flags, data);
++      if (err < 0) {
++              deactivate_locked_super(s);
++              return ERR_PTR(err);
++      }
+       return dget(s->s_root);
+ }
+ 
+diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
+index 172c751a4f6c2..f08e056ed0ae4 100644
+--- a/drivers/firmware/qemu_fw_cfg.c
++++ b/drivers/firmware/qemu_fw_cfg.c
+@@ -388,9 +388,7 @@ static void fw_cfg_sysfs_cache_cleanup(void)
+       struct fw_cfg_sysfs_entry *entry, *next;
+ 
+       list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
+-              /* will end up invoking fw_cfg_sysfs_cache_delist()
+-               * via each object's release() method (i.e. destructor)
+-               */
++              fw_cfg_sysfs_cache_delist(entry);
+               kobject_put(&entry->kobj);
+       }
+ }
+@@ -448,7 +446,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject 
*kobj)
+ {
+       struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+ 
+-      fw_cfg_sysfs_cache_delist(entry);
+       kfree(entry);
+ }
+ 
+@@ -601,20 +598,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file 
*f)
+       /* set file entry information */
+       entry->size = be32_to_cpu(f->size);
+       entry->select = be16_to_cpu(f->select);
+-      memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
++      strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
+ 
+       /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+       err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+                                  fw_cfg_sel_ko, "%d", entry->select);
+-      if (err) {
+-              kobject_put(&entry->kobj);
+-              return err;
+-      }
++      if (err)
++              goto err_put_entry;
+ 
+       /* add raw binary content access */
+       err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+       if (err)
+-              goto err_add_raw;
++              goto err_del_entry;
+ 
+       /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
+       fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
+@@ -623,9 +618,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file 
*f)
+       fw_cfg_sysfs_cache_enlist(entry);
+       return 0;
+ 
+-err_add_raw:
++err_del_entry:
+       kobject_del(&entry->kobj);
+-      kfree(entry);
++err_put_entry:
++      kobject_put(&entry->kobj);
+       return err;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index e727f1dd2a9a7..05f7ffd6a28da 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6065,6 +6065,7 @@ static void update_dsc_caps(struct amdgpu_dm_connector 
*aconnector,
+                                                       struct 
dsc_dec_dpcd_caps *dsc_caps)
+ {
+       stream->timing.flags.DSC = 0;
++      dsc_caps->is_dsc_supported = false;
+ 
+       if (aconnector->dc_link && sink->sink_signal == 
SIGNAL_TYPE_DISPLAY_PORT) {
+               dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+diff --git a/drivers/media/usb/uvc/uvc_video.c 
b/drivers/media/usb/uvc/uvc_video.c
+index 9f37eaf28ce7e..1b4cc934109e8 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -1963,6 +1963,10 @@ static int uvc_video_start_transfer(struct 
uvc_streaming *stream,
+               if (ep == NULL)
+                       return -EIO;
+ 
++              /* Reject broken descriptors. */
++              if (usb_endpoint_maxp(&ep->desc) == 0)
++                      return -EIO;
++
+               ret = uvc_init_video_bulk(stream, ep, gfp_flags);
+       }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c 
b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+index 6312fddd9c00a..eaba661133280 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+@@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+       _initpabias(hw);
+       rtl92c_dm_init(hw);
+ exit:
++      local_irq_disable();
+       local_irq_restore(flags);
+       return err;
+ }
+diff --git a/drivers/remoteproc/qcom_pil_info.c 
b/drivers/remoteproc/qcom_pil_info.c
+index 7c007dd7b2000..aca21560e20b8 100644
+--- a/drivers/remoteproc/qcom_pil_info.c
++++ b/drivers/remoteproc/qcom_pil_info.c
+@@ -104,7 +104,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t 
base, size_t size)
+       return -ENOMEM;
+ 
+ found_unused:
+-      memcpy_toio(entry, image, PIL_RELOC_NAME_LEN);
++      memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN));
+ found_existing:
+       /* Use two writel() as base is only aligned to 4 bytes on odd entries */
+       writel(base, entry + PIL_RELOC_NAME_LEN);
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c 
b/drivers/remoteproc/qcom_q6v5_pas.c
+index 03857dc9cdc12..120c16b14223b 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -652,6 +652,7 @@ static const struct adsp_data sm8350_cdsp_resource = {
+       .auto_boot = true,
+       .proxy_pd_names = (char*[]){
+               "cx",
++              "mxc",
+               NULL
+       },
+       .load_state = "cdsp",
+diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
+index e2757ff1c23d2..96e312a3eac75 100644
+--- a/drivers/video/fbdev/vga16fb.c
++++ b/drivers/video/fbdev/vga16fb.c
+@@ -184,6 +184,25 @@ static inline void setindex(int index)
+       vga_io_w(VGA_GFX_I, index);
+ }
+ 
++/* Check if the video mode is supported by the driver */
++static inline int check_mode_supported(void)
++{
++      /* non-x86 architectures treat orig_video_isVGA as a boolean flag */
++#if defined(CONFIG_X86)
++      /* only EGA and VGA in 16 color graphic mode are supported */
++      if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC &&
++          screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC)
++              return -ENODEV;
++
++      if (screen_info.orig_video_mode != 0x0D &&      /* 320x200/4 (EGA) */
++          screen_info.orig_video_mode != 0x0E &&      /* 640x200/4 (EGA) */
++          screen_info.orig_video_mode != 0x10 &&      /* 640x350/4 (EGA) */
++          screen_info.orig_video_mode != 0x12)        /* 640x480/4 (VGA) */
++              return -ENODEV;
++#endif
++      return 0;
++}
++
+ static void vga16fb_pan_var(struct fb_info *info, 
+                           struct fb_var_screeninfo *var)
+ {
+@@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void)
+ 
+       vga16fb_setup(option);
+ #endif
++
++      ret = check_mode_supported();
++      if (ret)
++              return ret;
++
+       ret = platform_driver_register(&vga16fb_driver);
+ 
+       if (!ret) {
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
+index fac918ccb3051..1d554d0b6e583 100644
+--- a/fs/9p/vfs_addr.c
++++ b/fs/9p/vfs_addr.c
+@@ -42,6 +42,11 @@ static void v9fs_req_issue_op(struct netfs_read_subrequest 
*subreq)
+       iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
+ 
+       total = p9_client_read(fid, pos, &to, &err);
++
++      /* if we just extended the file size, any portion not in
++       * cache won't be on server and is zeroes */
++      __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
++
+       netfs_subreq_terminated(subreq, err ?: total, false);
+ }
+ 
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 7dee89ba32e7b..52f8ae79db219 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -551,7 +551,10 @@ int v9fs_vfs_setattr_dotl(struct user_namespace 
*mnt_userns,
+ {
+       int retval, use_dentry = 0;
+       struct p9_fid *fid = NULL;
+-      struct p9_iattr_dotl p9attr;
++      struct p9_iattr_dotl p9attr = {
++              .uid = INVALID_UID,
++              .gid = INVALID_GID,
++      };
+       struct inode *inode = d_inode(dentry);
+ 
+       p9_debug(P9_DEBUG_VFS, "\n");
+@@ -561,14 +564,22 @@ int v9fs_vfs_setattr_dotl(struct user_namespace 
*mnt_userns,
+               return retval;
+ 
+       p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
+-      p9attr.mode = iattr->ia_mode;
+-      p9attr.uid = iattr->ia_uid;
+-      p9attr.gid = iattr->ia_gid;
+-      p9attr.size = iattr->ia_size;
+-      p9attr.atime_sec = iattr->ia_atime.tv_sec;
+-      p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
+-      p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
+-      p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
++      if (iattr->ia_valid & ATTR_MODE)
++              p9attr.mode = iattr->ia_mode;
++      if (iattr->ia_valid & ATTR_UID)
++              p9attr.uid = iattr->ia_uid;
++      if (iattr->ia_valid & ATTR_GID)
++              p9attr.gid = iattr->ia_gid;
++      if (iattr->ia_valid & ATTR_SIZE)
++              p9attr.size = iattr->ia_size;
++      if (iattr->ia_valid & ATTR_ATIME_SET) {
++              p9attr.atime_sec = iattr->ia_atime.tv_sec;
++              p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
++      }
++      if (iattr->ia_valid & ATTR_MTIME_SET) {
++              p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
++              p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
++      }
+ 
+       if (iattr->ia_valid & ATTR_FILE) {
+               fid = iattr->ia_file->private_data;
+diff --git a/fs/fs_context.c b/fs/fs_context.c
+index b7e43a780a625..24ce12f0db32e 100644
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -548,7 +548,7 @@ static int legacy_parse_param(struct fs_context *fc, 
struct fs_parameter *param)
+                             param->key);
+       }
+ 
+-      if (len > PAGE_SIZE - 2 - size)
++      if (size + len + 2 > PAGE_SIZE)
+               return invalf(fc, "VFS: Legacy: Cumulative options too large");
+       if (strchr(param->key, ',') ||
+           (param->type == fs_value_is_string &&
+diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
+index 15dac36ca852e..8ef53f6726ec8 100644
+--- a/fs/nfsd/nfs3proc.c
++++ b/fs/nfsd/nfs3proc.c
+@@ -202,15 +202,11 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
+       fh_copy(&resp->fh, &argp->fh);
+       resp->committed = argp->stable;
+       nvecs = svc_fill_write_vector(rqstp, &argp->payload);
+-      if (!nvecs) {
+-              resp->status = nfserr_io;
+-              goto out;
+-      }
++
+       resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
+                                 rqstp->rq_vec, nvecs, &cnt,
+                                 resp->committed, resp->verf);
+       resp->count = cnt;
+-out:
+       return rpc_success;
+ }
+ 
+diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
+index de282f3273c50..312fd289be583 100644
+--- a/fs/nfsd/nfsproc.c
++++ b/fs/nfsd/nfsproc.c
+@@ -235,10 +235,6 @@ nfsd_proc_write(struct svc_rqst *rqstp)
+               argp->len, argp->offset);
+ 
+       nvecs = svc_fill_write_vector(rqstp, &argp->payload);
+-      if (!nvecs) {
+-              resp->status = nfserr_io;
+-              goto out;
+-      }
+ 
+       resp->status = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
+                                 argp->offset, rqstp->rq_vec, nvecs,
+@@ -247,7 +243,6 @@ nfsd_proc_write(struct svc_rqst *rqstp)
+               resp->status = fh_getattr(&resp->fh, &resp->stat);
+       else if (resp->status == nfserr_jukebox)
+               return rpc_drop_reply;
+-out:
+       return rpc_success;
+ }
+ 
+diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
+index 538e839590ef5..b501dc07f9222 100644
+--- a/fs/orangefs/orangefs-bufmap.c
++++ b/fs/orangefs/orangefs-bufmap.c
+@@ -176,7 +176,7 @@ orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
+ {
+       kfree(bufmap->page_array);
+       kfree(bufmap->desc_array);
+-      kfree(bufmap->buffer_index_array);
++      bitmap_free(bufmap->buffer_index_array);
+       kfree(bufmap);
+ }
+ 
+@@ -226,8 +226,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc 
*user_desc)
+       bufmap->desc_size = user_desc->size;
+       bufmap->desc_shift = ilog2(bufmap->desc_size);
+ 
+-      bufmap->buffer_index_array =
+-              kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), 
GFP_KERNEL);
++      bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, 
GFP_KERNEL);
+       if (!bufmap->buffer_index_array)
+               goto out_free_bufmap;
+ 
+@@ -250,7 +249,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc 
*user_desc)
+ out_free_desc_array:
+       kfree(bufmap->desc_array);
+ out_free_index_array:
+-      kfree(bufmap->buffer_index_array);
++      bitmap_free(bufmap->buffer_index_array);
+ out_free_bufmap:
+       kfree(bufmap);
+ out:
+diff --git a/fs/super.c b/fs/super.c
+index 3bfc0f8fbd5bc..a6405d44d4ca2 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1423,8 +1423,8 @@ struct dentry *mount_nodev(struct file_system_type 
*fs_type,
+ }
+ EXPORT_SYMBOL(mount_nodev);
+ 
+-static int reconfigure_single(struct super_block *s,
+-                            int flags, void *data)
++int reconfigure_single(struct super_block *s,
++                     int flags, void *data)
+ {
+       struct fs_context *fc;
+       int ret;
+diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
+index 6b54982fc5f37..13fa6f3df8e46 100644
+--- a/include/linux/fs_context.h
++++ b/include/linux/fs_context.h
+@@ -142,6 +142,8 @@ extern void put_fs_context(struct fs_context *fc);
+ extern int vfs_parse_fs_param_source(struct fs_context *fc,
+                                    struct fs_parameter *param);
+ extern void fc_drop_locked(struct fs_context *fc);
++int reconfigure_single(struct super_block *s,
++                     int flags, void *data);
+ 
+ /*
+  * sget() wrappers to be called from the ->get_tree() op.
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 0dcfd265beed5..318c489b735bc 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1240,7 +1240,18 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
+                                enum perf_bpf_event_type type,
+                                u16 flags);
+ 
+-extern struct perf_guest_info_callbacks *perf_guest_cbs;
++extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
++static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
++{
++      /*
++       * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
++       * the callbacks between a !NULL check and dereferences, to ensure
++       * pending stores/changes to the callback pointers are visible before a
++       * non-NULL perf_guest_cbs is visible to readers, and to prevent a
++       * module from unloading callbacks while readers are active.
++       */
++      return rcu_dereference(perf_guest_cbs);
++}
+ extern int perf_register_guest_info_callbacks(struct 
perf_guest_info_callbacks *callbacks);
+ extern int perf_unregister_guest_info_callbacks(struct 
perf_guest_info_callbacks *callbacks);
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 30d94f68c5bdb..63f0414666438 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6526,18 +6526,25 @@ static void perf_pending_event(struct irq_work *entry)
+  * Later on, we might change it to a list if there is
+  * another virtualization implementation supporting the callbacks.
+  */
+-struct perf_guest_info_callbacks *perf_guest_cbs;
++struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+ 
+ int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+ {
+-      perf_guest_cbs = cbs;
++      if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
++              return -EBUSY;
++
++      rcu_assign_pointer(perf_guest_cbs, cbs);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
+ 
+ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks 
*cbs)
+ {
+-      perf_guest_cbs = NULL;
++      if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
++              return -EINVAL;
++
++      rcu_assign_pointer(perf_guest_cbs, NULL);
++      synchronize_rcu();
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
+diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
+index ea700395bef40..773f4903550a0 100644
+--- a/sound/pci/hda/hda_tegra.c
++++ b/sound/pci/hda/hda_tegra.c
+@@ -68,14 +68,20 @@
+  */
+ #define TEGRA194_NUM_SDO_LINES          4
+ 
++struct hda_tegra_soc {
++      bool has_hda2codec_2x_reset;
++};
++
+ struct hda_tegra {
+       struct azx chip;
+       struct device *dev;
+-      struct reset_control *reset;
++      struct reset_control_bulk_data resets[3];
+       struct clk_bulk_data clocks[3];
++      unsigned int nresets;
+       unsigned int nclocks;
+       void __iomem *regs;
+       struct work_struct probe_work;
++      const struct hda_tegra_soc *soc;
+ };
+ 
+ #ifdef CONFIG_PM
+@@ -170,7 +176,7 @@ static int __maybe_unused hda_tegra_runtime_resume(struct 
device *dev)
+       int rc;
+ 
+       if (!chip->running) {
+-              rc = reset_control_assert(hda->reset);
++              rc = reset_control_bulk_assert(hda->nresets, hda->resets);
+               if (rc)
+                       return rc;
+       }
+@@ -187,7 +193,7 @@ static int __maybe_unused hda_tegra_runtime_resume(struct 
device *dev)
+       } else {
+               usleep_range(10, 100);
+ 
+-              rc = reset_control_deassert(hda->reset);
++              rc = reset_control_bulk_deassert(hda->nresets, hda->resets);
+               if (rc)
+                       return rc;
+       }
+@@ -427,9 +433,17 @@ static int hda_tegra_create(struct snd_card *card,
+       return 0;
+ }
+ 
++static const struct hda_tegra_soc tegra30_data = {
++      .has_hda2codec_2x_reset = true,
++};
++
++static const struct hda_tegra_soc tegra194_data = {
++      .has_hda2codec_2x_reset = false,
++};
++
+ static const struct of_device_id hda_tegra_match[] = {
+-      { .compatible = "nvidia,tegra30-hda" },
+-      { .compatible = "nvidia,tegra194-hda" },
++      { .compatible = "nvidia,tegra30-hda", .data = &tegra30_data },
++      { .compatible = "nvidia,tegra194-hda", .data = &tegra194_data },
+       {},
+ };
+ MODULE_DEVICE_TABLE(of, hda_tegra_match);
+@@ -449,6 +463,8 @@ static int hda_tegra_probe(struct platform_device *pdev)
+       hda->dev = &pdev->dev;
+       chip = &hda->chip;
+ 
++      hda->soc = of_device_get_match_data(&pdev->dev);
++
+       err = snd_card_new(&pdev->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+                          THIS_MODULE, 0, &card);
+       if (err < 0) {
+@@ -456,11 +472,20 @@ static int hda_tegra_probe(struct platform_device *pdev)
+               return err;
+       }
+ 
+-      hda->reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+-      if (IS_ERR(hda->reset)) {
+-              err = PTR_ERR(hda->reset);
++      hda->resets[hda->nresets++].id = "hda";
++      hda->resets[hda->nresets++].id = "hda2hdmi";
++      /*
++       * "hda2codec_2x" reset is not present on Tegra194. Though DT would
++       * be updated to reflect this, but to have backward compatibility
++       * below is necessary.
++       */
++      if (hda->soc->has_hda2codec_2x_reset)
++              hda->resets[hda->nresets++].id = "hda2codec_2x";
++
++      err = devm_reset_control_bulk_get_exclusive(&pdev->dev, hda->nresets,
++                                                  hda->resets);
++      if (err)
+               goto out_free;
+-      }
+ 
+       hda->clocks[hda->nclocks++].id = "hda";
+       hda->clocks[hda->nclocks++].id = "hda2hdmi";
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 28255e752c4a1..fa80a79e9f966 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1924,6 +1924,7 @@ enum {
+       ALC887_FIXUP_ASUS_BASS,
+       ALC887_FIXUP_BASS_CHMAP,
+       ALC1220_FIXUP_GB_DUAL_CODECS,
++      ALC1220_FIXUP_GB_X570,
+       ALC1220_FIXUP_CLEVO_P950,
+       ALC1220_FIXUP_CLEVO_PB51ED,
+       ALC1220_FIXUP_CLEVO_PB51ED_PINS,
+@@ -2113,6 +2114,29 @@ static void alc1220_fixup_gb_dual_codecs(struct 
hda_codec *codec,
+       }
+ }
+ 
++static void alc1220_fixup_gb_x570(struct hda_codec *codec,
++                                   const struct hda_fixup *fix,
++                                   int action)
++{
++      static const hda_nid_t conn1[] = { 0x0c };
++      static const struct coef_fw gb_x570_coefs[] = {
++              WRITE_COEF(0x1a, 0x01c1),
++              WRITE_COEF(0x1b, 0x0202),
++              WRITE_COEF(0x43, 0x3005),
++              {}
++      };
++
++      switch (action) {
++      case HDA_FIXUP_ACT_PRE_PROBE:
++              snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), 
conn1);
++              snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), 
conn1);
++              break;
++      case HDA_FIXUP_ACT_INIT:
++              alc_process_coef_fw(codec, gb_x570_coefs);
++              break;
++      }
++}
++
+ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
+                                    const struct hda_fixup *fix,
+                                    int action)
+@@ -2415,6 +2439,10 @@ static const struct hda_fixup alc882_fixups[] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc1220_fixup_gb_dual_codecs,
+       },
++      [ALC1220_FIXUP_GB_X570] = {
++              .type = HDA_FIXUP_FUNC,
++              .v.func = alc1220_fixup_gb_x570,
++      },
+       [ALC1220_FIXUP_CLEVO_P950] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc1220_fixup_clevo_p950,
+@@ -2517,7 +2545,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
+       SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", 
ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", 
ALC1220_FIXUP_GB_DUAL_CODECS),
+-      SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", 
ALC1220_FIXUP_CLEVO_P950),
++      SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", 
ALC1220_FIXUP_GB_X570),
+       SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", 
ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+@@ -6784,6 +6812,8 @@ enum {
+       ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+       ALC233_FIXUP_NO_AUDIO_JACK,
+       ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME,
++      ALC285_FIXUP_LEGION_Y9000X_SPEAKERS,
++      ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -8380,6 +8410,18 @@ static const struct hda_fixup alc269_fixups[] = {
+               .chained = true,
+               .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+       },
++      [ALC285_FIXUP_LEGION_Y9000X_SPEAKERS] = {
++              .type = HDA_FIXUP_FUNC,
++              .v.func = alc285_fixup_ideapad_s740_coef,
++              .chained = true,
++              .chain_id = ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
++      },
++      [ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE] = {
++              .type = HDA_FIXUP_FUNC,
++              .v.func = alc287_fixup_legion_15imhg05_speakers,
++              .chained = true,
++              .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
++      },
+       [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = {
+               .type = HDA_FIXUP_VERBS,
+               //.v.verbs = legion_15imhg05_coefs,
+@@ -8730,6 +8772,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", 
ALC285_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", 
ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", 
ALC287_FIXUP_HP_GPIO_LED),
++      SND_PCI_QUIRK(0x103c, 0x89c3, "HP", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x89ca, "HP", 
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+@@ -8921,13 +8964,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = 
{
+       SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", 
ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", 
ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", 
ALC623_FIXUP_LENOVO_THINKSTATION_P340),
++      SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", 
ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", 
ALC298_FIXUP_LENOVO_SPK_VOLUME),
++      SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", 
ALC287_FIXUP_13S_GEN2_SPEAKERS),
++      SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", 
ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", 
ALC285_FIXUP_IDEAPAD_S740_COEF),
++      SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", 
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", 
ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
+-      SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", 
ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
++      SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", 
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", 
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", 
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+-      SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", 
ALC287_FIXUP_13S_GEN2_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", 
ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+       SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", 
ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+diff --git a/tools/perf/ui/browsers/annotate.c 
b/tools/perf/ui/browsers/annotate.c
+index e81c2493efdf9..44ba900828f6c 100644
+--- a/tools/perf/ui/browsers/annotate.c
++++ b/tools/perf/ui/browsers/annotate.c
+@@ -966,6 +966,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct 
evsel *evsel,
+               .opts = opts,
+       };
+       int ret = -1, err;
++      int not_annotated = list_empty(&notes->src->source);
+ 
+       if (sym == NULL)
+               return -1;
+@@ -973,13 +974,15 @@ int symbol__tui_annotate(struct map_symbol *ms, struct 
evsel *evsel,
+       if (ms->map->dso->annotate_warned)
+               return -1;
+ 
+-      err = symbol__annotate2(ms, evsel, opts, &browser.arch);
+-      if (err) {
+-              char msg[BUFSIZ];
+-              ms->map->dso->annotate_warned = true;
+-              symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
+-              ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
+-              goto out_free_offsets;
++      if (not_annotated) {
++              err = symbol__annotate2(ms, evsel, opts, &browser.arch);
++              if (err) {
++                      char msg[BUFSIZ];
++                      ms->map->dso->annotate_warned = true;
++                      symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
++                      ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
++                      goto out_free_offsets;
++              }
+       }
+ 
+       ui_helpline__push("Press ESC to exit");
+@@ -994,9 +997,11 @@ int symbol__tui_annotate(struct map_symbol *ms, struct 
evsel *evsel,
+ 
+       ret = annotate_browser__run(&browser, evsel, hbt);
+ 
+-      annotated_source__purge(notes->src);
++      if(not_annotated)
++              annotated_source__purge(notes->src);
+ 
+ out_free_offsets:
+-      zfree(&notes->offsets);
++      if(not_annotated)
++              zfree(&notes->offsets);
+       return ret;
+ }

Reply via email to