commit:     463e67dc5a2521fdc2e4ceafe2f42ac32f680752
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Feb 19 23:49:07 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Feb 19 23:49:07 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=463e67dc

Linux patch 5.5.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1004_linux-5.5.5.patch | 3180 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3184 insertions(+)

diff --git a/0000_README b/0000_README
index 567c784..7eb2076 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-5.5.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.5.4
 
+Patch:  1004_linux-5.5.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.5.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-5.5.5.patch b/1004_linux-5.5.5.patch
new file mode 100644
index 0000000..1da35c5
--- /dev/null
+++ b/1004_linux-5.5.5.patch
@@ -0,0 +1,3180 @@
+diff --git a/Makefile b/Makefile
+index 62f956e9c81d..1f7dc3a2e1dd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 5
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
+index 880bc2a5cada..7f7002dc2b21 100644
+--- a/arch/arm/mach-npcm/Kconfig
++++ b/arch/arm/mach-npcm/Kconfig
+@@ -11,7 +11,7 @@ config ARCH_NPCM7XX
+       depends on ARCH_MULTI_V7
+       select PINCTRL_NPCM7XX
+       select NPCM7XX_TIMER
+-      select ARCH_REQUIRE_GPIOLIB
++      select GPIOLIB
+       select CACHE_L2X0
+       select ARM_GIC
+       select HAVE_ARM_TWD if SMP
+diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts 
b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+index 62ab0d54ff71..335fff762451 100644
+--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
++++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+@@ -161,10 +161,10 @@
+               bus-range = <0x0 0x1>;
+               reg = <0x0 0x40000000 0x0 0x10000000>;
+               ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 
0x10000000>;
+-              interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+-                              <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+-                              <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+-                              <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
++              interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 
IRQ_TYPE_LEVEL_HIGH>,
++                              <0 0 0 2 &gic 0 0 GIC_SPI 169 
IRQ_TYPE_LEVEL_HIGH>,
++                              <0 0 0 3 &gic 0 0 GIC_SPI 170 
IRQ_TYPE_LEVEL_HIGH>,
++                              <0 0 0 4 &gic 0 0 GIC_SPI 171 
IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+               msi-map = <0x0 &its 0x0 0x10000>;
+               iommu-map = <0x0 &smmu 0x0 0x10000>;
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index d54586d5b031..fab013c5ee8c 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next)
+       if (unlikely(next->flags & PF_KTHREAD))
+               return;
+ 
++      /*
++       * If all CPUs implement the SSBS extension, then we just need to
++       * context-switch the PSTATE field.
++       */
++      if (cpu_have_feature(cpu_feature(SSBS)))
++              return;
++
+       /* If the mitigation is enabled, then we leave SSBS clear. */
+       if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+           test_tsk_thread_flag(next, TIF_SSBD))
+diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
+index ed007f4a6444..3f501159ee9f 100644
+--- a/arch/s390/boot/uv.c
++++ b/arch/s390/boot/uv.c
+@@ -15,7 +15,8 @@ void uv_query_info(void)
+       if (!test_facility(158))
+               return;
+ 
+-      if (uv_call(0, (uint64_t)&uvcb))
++      /* rc==0x100 means that there is additional data we do not process */
++      if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
+               return;
+ 
+       if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long 
*)uvcb.inst_calls_list) &&
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index 670f14a228e5..6bf3a45ccfec 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)
+ 
+ static inline unsigned long long get_tod_clock(void)
+ {
+-      unsigned char clk[STORE_CLOCK_EXT_SIZE];
++      char clk[STORE_CLOCK_EXT_SIZE];
+ 
+       get_tod_clock_ext(clk);
+       return *((unsigned long long *)&clk[1]);
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index a7752cd78b89..dede714b46e8 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -246,6 +246,7 @@ static const u64 
amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0xff60,
++      [PERF_COUNT_HW_CACHE_MISSES]            = 0x0964,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index ce83950036c5..e5ad97a82342 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1713,6 +1713,8 @@ intel_pmu_save_and_restart_reload(struct perf_event 
*event, int count)
+       old = ((s64)(prev_raw_count << shift) >> shift);
+       local64_add(new - old + count * period, &event->count);
+ 
++      local64_set(&hwc->period_left, -new);
++
+       perf_event_update_userpage(event);
+ 
+       return 0;
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
+index c1d7b866a03f..4e3f137ffa8c 100644
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -33,7 +33,7 @@
+       #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
+       #define PT_HAVE_ACCESSED_DIRTY(mmu) true
+       #ifdef CONFIG_X86_64
+-      #define PT_MAX_FULL_LEVELS 4
++      #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
+       #define CMPXCHG cmpxchg
+       #else
+       #define CMPXCHG cmpxchg64
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 5bfa8228f0c7..3babe5e29429 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3583,6 +3583,33 @@ static void nested_vmx_inject_exception_vmexit(struct 
kvm_vcpu *vcpu,
+       nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, 
exit_qual);
+ }
+ 
++/*
++ * Returns true if a debug trap is pending delivery.
++ *
++ * In KVM, debug traps bear an exception payload. As such, the class of a #DB
++ * exception may be inferred from the presence of an exception payload.
++ */
++static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
++{
++      return vcpu->arch.exception.pending &&
++                      vcpu->arch.exception.nr == DB_VECTOR &&
++                      vcpu->arch.exception.payload;
++}
++
++/*
++ * Certain VM-exits set the 'pending debug exceptions' field to indicate a
++ * recognized #DB (data or single-step) that has yet to be delivered. Since 
KVM
++ * represents these debug traps with a payload that is said to be compatible
++ * with the 'pending debug exceptions' field, write the payload to the VMCS
++ * field if a VM-exit is delivered before the debug trap.
++ */
++static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
++{
++      if (vmx_pending_dbg_trap(vcpu))
++              vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
++                          vcpu->arch.exception.payload);
++}
++
+ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+ {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -3595,6 +3622,7 @@ static int vmx_check_nested_events(struct kvm_vcpu 
*vcpu, bool external_intr)
+               test_bit(KVM_APIC_INIT, &apic->pending_events)) {
+               if (block_nested_events)
+                       return -EBUSY;
++              nested_vmx_update_pending_dbg(vcpu);
+               clear_bit(KVM_APIC_INIT, &apic->pending_events);
+               nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
+               return 0;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 78e01e2524bc..c0d837c37f34 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2968,6 +2968,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long 
cr0)
+ 
+ static int get_ept_level(struct kvm_vcpu *vcpu)
+ {
++      /* Nested EPT currently only supports 4-level walks. */
++      if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
++              return 4;
+       if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
+               return 5;
+       return 4;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index d744c1bf4dc8..e594fd2719dd 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -437,6 +437,14 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
+                * for #DB exceptions under VMX.
+                */
+               vcpu->arch.dr6 ^= payload & DR6_RTM;
++
++              /*
++               * The #DB payload is defined as compatible with the 'pending
++               * debug exceptions' field under VMX, not DR6. While bit 12 is
++               * defined in the 'pending debug exceptions' field (enabled
++               * breakpoint), it is reserved and must be zero in DR6.
++               */
++              vcpu->arch.dr6 &= ~BIT(12);
+               break;
+       case PF_VECTOR:
+               vcpu->arch.cr2 = payload;
+diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
+index bcf8f7501db7..a74c1a0e892d 100644
+--- a/drivers/acpi/acpica/achware.h
++++ b/drivers/acpi/acpica/achware.h
+@@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
+ 
+ acpi_status acpi_hw_enable_all_wakeup_gpes(void);
+ 
++u8 acpi_hw_check_all_gpes(void);
++
+ acpi_status
+ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+                                struct acpi_gpe_block_info *gpe_block,
+diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
+index 04a40d563dd6..84b0b410310e 100644
+--- a/drivers/acpi/acpica/evxfgpe.c
++++ b/drivers/acpi/acpica/evxfgpe.c
+@@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void)
+ 
+ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
+ 
++/******************************************************************************
++ *
++ * FUNCTION:    acpi_any_gpe_status_set
++ *
++ * PARAMETERS:  None
++ *
++ * RETURN:      Whether or not the status bit is set for any GPE
++ *
++ * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if 
any
++ *              of them is set or FALSE otherwise.
++ *
++ 
******************************************************************************/
++u32 acpi_any_gpe_status_set(void)
++{
++      acpi_status status;
++      u8 ret;
++
++      ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
++
++      status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
++      if (ACPI_FAILURE(status)) {
++              return (FALSE);
++      }
++
++      ret = acpi_hw_check_all_gpes();
++      (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
++
++      return (ret);
++}
++
++ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set)
++
+ 
/*******************************************************************************
+  *
+  * FUNCTION:    acpi_install_gpe_block
+diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
+index 565bd3f29f31..b1d7d5f92495 100644
+--- a/drivers/acpi/acpica/hwgpe.c
++++ b/drivers/acpi/acpica/hwgpe.c
+@@ -444,6 +444,53 @@ acpi_hw_enable_wakeup_gpe_block(struct 
acpi_gpe_xrupt_info *gpe_xrupt_info,
+       return (AE_OK);
+ }
+ 
++/******************************************************************************
++ *
++ * FUNCTION:    acpi_hw_get_gpe_block_status
++ *
++ * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
++ *              gpe_block           - Gpe Block info
++ *
++ * RETURN:      Success
++ *
++ * DESCRIPTION: Produce a combined GPE status bits mask for the given block.
++ *
++ 
******************************************************************************/
++
++static acpi_status
++acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
++                           struct acpi_gpe_block_info *gpe_block,
++                           void *ret_ptr)
++{
++      struct acpi_gpe_register_info *gpe_register_info;
++      u64 in_enable, in_status;
++      acpi_status status;
++      u8 *ret = ret_ptr;
++      u32 i;
++
++      /* Examine each GPE Register within the block */
++
++      for (i = 0; i < gpe_block->register_count; i++) {
++              gpe_register_info = &gpe_block->register_info[i];
++
++              status = acpi_hw_read(&in_enable,
++                                    &gpe_register_info->enable_address);
++              if (ACPI_FAILURE(status)) {
++                      continue;
++              }
++
++              status = acpi_hw_read(&in_status,
++                                    &gpe_register_info->status_address);
++              if (ACPI_FAILURE(status)) {
++                      continue;
++              }
++
++              *ret |= in_enable & in_status;
++      }
++
++      return (AE_OK);
++}
++
+ 
/******************************************************************************
+  *
+  * FUNCTION:    acpi_hw_disable_all_gpes
+@@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
+       return_ACPI_STATUS(status);
+ }
+ 
++/******************************************************************************
++ *
++ * FUNCTION:    acpi_hw_check_all_gpes
++ *
++ * PARAMETERS:  None
++ *
++ * RETURN:      Combined status of all GPEs
++ *
++ * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if 
the
++ *              status bit is set for at least one of them of FALSE otherwise.
++ *
++ 
******************************************************************************/
++
++u8 acpi_hw_check_all_gpes(void)
++{
++      u8 ret = 0;
++
++      ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
++
++      (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
++
++      return (ret != 0);
++}
++
+ #endif                                /* !ACPI_REDUCED_HARDWARE */
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index d05be13c1022..bd74c7836675 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec);
+ 
+ static struct acpi_ec *boot_ec;
+ static bool boot_ec_is_ecdt = false;
++static struct workqueue_struct *ec_wq;
+ static struct workqueue_struct *ec_query_wq;
+ 
+ static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
+@@ -469,7 +470,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
+               ec_dbg_evt("Command(%s) submitted/blocked",
+                          acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
+               ec->nr_pending_queries++;
+-              schedule_work(&ec->work);
++              queue_work(ec_wq, &ec->work);
+       }
+ }
+ 
+@@ -535,7 +536,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
+ #ifdef CONFIG_PM_SLEEP
+ static void __acpi_ec_flush_work(void)
+ {
+-      flush_scheduled_work(); /* flush ec->work */
++      drain_workqueue(ec_wq); /* flush ec->work */
+       flush_workqueue(ec_query_wq); /* flush queries */
+ }
+ 
+@@ -556,8 +557,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
+ 
+ void acpi_ec_flush_work(void)
+ {
+-      /* Without ec_query_wq there is nothing to flush. */
+-      if (!ec_query_wq)
++      /* Without ec_wq there is nothing to flush. */
++      if (!ec_wq)
+               return;
+ 
+       __acpi_ec_flush_work();
+@@ -2115,25 +2116,33 @@ static struct acpi_driver acpi_ec_driver = {
+       .drv.pm = &acpi_ec_pm,
+ };
+ 
+-static inline int acpi_ec_query_init(void)
++static void acpi_ec_destroy_workqueues(void)
+ {
+-      if (!ec_query_wq) {
+-              ec_query_wq = alloc_workqueue("kec_query", 0,
+-                                            ec_max_queries);
+-              if (!ec_query_wq)
+-                      return -ENODEV;
++      if (ec_wq) {
++              destroy_workqueue(ec_wq);
++              ec_wq = NULL;
+       }
+-      return 0;
+-}
+-
+-static inline void acpi_ec_query_exit(void)
+-{
+       if (ec_query_wq) {
+               destroy_workqueue(ec_query_wq);
+               ec_query_wq = NULL;
+       }
+ }
+ 
++static int acpi_ec_init_workqueues(void)
++{
++      if (!ec_wq)
++              ec_wq = alloc_ordered_workqueue("kec", 0);
++
++      if (!ec_query_wq)
++              ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
++
++      if (!ec_wq || !ec_query_wq) {
++              acpi_ec_destroy_workqueues();
++              return -ENODEV;
++      }
++      return 0;
++}
++
+ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+       {
+               .ident = "Thinkpad X1 Carbon 6th",
+@@ -2164,8 +2173,7 @@ int __init acpi_ec_init(void)
+       int result;
+       int ecdt_fail, dsdt_fail;
+ 
+-      /* register workqueue for _Qxx evaluations */
+-      result = acpi_ec_query_init();
++      result = acpi_ec_init_workqueues();
+       if (result)
+               return result;
+ 
+@@ -2196,6 +2204,6 @@ static void __exit acpi_ec_exit(void)
+ {
+ 
+       acpi_bus_unregister_driver(&acpi_ec_driver);
+-      acpi_ec_query_exit();
++      acpi_ec_destroy_workqueues();
+ }
+ #endif        /* 0 */
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 6747a279621b..5672fa8cb300 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -987,21 +987,34 @@ static void acpi_s2idle_sync(void)
+       acpi_os_wait_events_complete(); /* synchronize Notify handling */
+ }
+ 
+-static void acpi_s2idle_wake(void)
++static bool acpi_s2idle_wake(void)
+ {
+-      /*
+-       * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
+-       * not triggered while suspended, so bail out.
+-       */
+-      if (!acpi_sci_irq_valid() ||
+-          irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+-              return;
++      if (!acpi_sci_irq_valid())
++              return pm_wakeup_pending();
++
++      while (pm_wakeup_pending()) {
++              /*
++               * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
++               * SCI has not triggered while suspended, so bail out (the
++               * wakeup is pending anyway and the SCI is not the source of
++               * it).
++               */
++              if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
++                      return true;
++
++              /*
++               * If there are no EC events to process and at least one of the
++               * other enabled GPEs is active, the wakeup is regarded as a
++               * genuine one.
++               *
++               * Note that the checks below must be carried out in this order
++               * to avoid returning prematurely due to a change of the EC GPE
++               * status bit from unset to set between the checks with the
++               * status bits of all the other GPEs unset.
++               */
++              if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
++                      return true;
+ 
+-      /*
+-       * If there are EC events to process, the wakeup may be a spurious one
+-       * coming from the EC.
+-       */
+-      if (acpi_ec_dispatch_gpe()) {
+               /*
+                * Cancel the wakeup and process all pending events in case
+                * there are any wakeup ones in there.
+@@ -1014,8 +1027,19 @@ static void acpi_s2idle_wake(void)
+ 
+               acpi_s2idle_sync();
+ 
++              /*
++               * The SCI is in the "suspended" state now and it cannot produce
++               * new wakeup events till the rearming below, so if any of them
++               * are pending here, they must be resulting from the processing
++               * of EC events above or coming from somewhere else.
++               */
++              if (pm_wakeup_pending())
++                      return true;
++
+               rearm_wake_irq(acpi_sci_irq);
+       }
++
++      return false;
+ }
+ 
+ static void acpi_s2idle_restore_early(void)
+diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
+index 36cf13eee6b8..68413bf9cf87 100644
+--- a/drivers/bus/moxtet.c
++++ b/drivers/bus/moxtet.c
+@@ -466,7 +466,7 @@ static ssize_t input_read(struct file *file, char __user 
*buf, size_t len,
+ {
+       struct moxtet *moxtet = file->private_data;
+       u8 bin[TURRIS_MOX_MAX_MODULES];
+-      u8 hex[sizeof(buf) * 2 + 1];
++      u8 hex[sizeof(bin) * 2 + 1];
+       int ret, n;
+ 
+       ret = moxtet_spi_read(moxtet, bin);
+diff --git a/drivers/char/ipmi/ipmb_dev_int.c 
b/drivers/char/ipmi/ipmb_dev_int.c
+index 1ff4fb1def7c..800532595ea7 100644
+--- a/drivers/char/ipmi/ipmb_dev_int.c
++++ b/drivers/char/ipmi/ipmb_dev_int.c
+@@ -253,7 +253,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
+               break;
+ 
+       case I2C_SLAVE_WRITE_RECEIVED:
+-              if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg))
++              if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
+                       break;
+ 
+               buf[++ipmb_dev->msg_idx] = *val;
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index 7243b88f81d8..69e0d90460e6 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -505,16 +505,10 @@ void edac_mc_free(struct mem_ctl_info *mci)
+ {
+       edac_dbg(1, "\n");
+ 
+-      /* If we're not yet registered with sysfs free only what was allocated
+-       * in edac_mc_alloc().
+-       */
+-      if (!device_is_registered(&mci->dev)) {
+-              _edac_mc_free(mci);
+-              return;
+-      }
++      if (device_is_registered(&mci->dev))
++              edac_unregister_sysfs(mci);
+ 
+-      /* the mci instance is freed here, when the sysfs object is dropped */
+-      edac_unregister_sysfs(mci);
++      _edac_mc_free(mci);
+ }
+ EXPORT_SYMBOL_GPL(edac_mc_free);
+ 
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 0367554e7437..c70ec0a306d8 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] 
= {
+ 
+ static void csrow_attr_release(struct device *dev)
+ {
+-      struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
+-
+-      edac_dbg(1, "device %s released\n", dev_name(dev));
+-      kfree(csrow);
++      /* release device with _edac_mc_free() */
+ }
+ 
+ static const struct device_type csrow_attr_type = {
+@@ -447,8 +444,7 @@ error:
+               csrow = mci->csrows[i];
+               if (!nr_pages_per_csrow(csrow))
+                       continue;
+-
+-              device_del(&mci->csrows[i]->dev);
++              device_unregister(&mci->csrows[i]->dev);
+       }
+ 
+       return err;
+@@ -608,10 +604,7 @@ static const struct attribute_group *dimm_attr_groups[] = 
{
+ 
+ static void dimm_attr_release(struct device *dev)
+ {
+-      struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
+-
+-      edac_dbg(1, "device %s released\n", dev_name(dev));
+-      kfree(dimm);
++      /* release device with _edac_mc_free() */
+ }
+ 
+ static const struct device_type dimm_attr_type = {
+@@ -893,10 +886,7 @@ static const struct attribute_group *mci_attr_groups[] = {
+ 
+ static void mci_attr_release(struct device *dev)
+ {
+-      struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
+-
+-      edac_dbg(1, "device %s released\n", dev_name(dev));
+-      kfree(mci);
++      /* release device with _edac_mc_free() */
+ }
+ 
+ static const struct device_type mci_attr_type = {
+diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
+index a9748b5198e6..67f9f82e0db0 100644
+--- a/drivers/gpio/gpio-xilinx.c
++++ b/drivers/gpio/gpio-xilinx.c
+@@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, 
unsigned long *mask,
+       for (i = 0; i < gc->ngpio; i++) {
+               if (*mask == 0)
+                       break;
++              /* Once finished with an index write it out to the register */
+               if (index !=  xgpio_index(chip, i)) {
+                       xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
+-                                     xgpio_regoffset(chip, i),
++                                     index * XGPIO_CHANNEL_OFFSET,
+                                      chip->gpio_state[index]);
+                       spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+                       index =  xgpio_index(chip, i);
+@@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, 
unsigned long *mask,
+       }
+ 
+       xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
+-                     xgpio_regoffset(chip, i), chip->gpio_state[index]);
++                     index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
+ 
+       spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ }
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index b696e4598a24..b0e79bed5952 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -147,10 +147,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
+                       if (of_property_read_bool(np, "cd-inverted"))
+                               *flags ^= OF_GPIO_ACTIVE_LOW;
+               }
+-              if (!strcmp(propname, "wp-gpios")) {
+-                      if (of_property_read_bool(np, "wp-inverted"))
+-                              *flags ^= OF_GPIO_ACTIVE_LOW;
+-              }
+       }
+       /*
+        * Some GPIO fixed regulator quirks.
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 78a16e42f222..bcfbfded9ba3 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -3371,6 +3371,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
+ }
+ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
+ 
++/**
++ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
++ * @desc: the gpio descriptor to change
++ */
++void gpiod_toggle_active_low(struct gpio_desc *desc)
++{
++      VALIDATE_DESC_VOID(desc);
++      change_bit(FLAG_ACTIVE_LOW, &desc->flags);
++}
++EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
++
+ /* I/O calls are only valid after configuration completed; the relevant
+  * "is this a valid GPIO" error checks should already have been done.
+  *
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+index b2f96a101124..7a63cf8e85ed 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+@@ -39,21 +39,39 @@
+ #define SMU_11_0_PP_OVERDRIVE_VERSION                   0x0800
+ #define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION            0x0100
+ 
++enum SMU_11_0_ODFEATURE_CAP {
++    SMU_11_0_ODCAP_GFXCLK_LIMITS = 0,
++    SMU_11_0_ODCAP_GFXCLK_CURVE,
++    SMU_11_0_ODCAP_UCLK_MAX,
++    SMU_11_0_ODCAP_POWER_LIMIT,
++    SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,
++    SMU_11_0_ODCAP_FAN_SPEED_MIN,
++    SMU_11_0_ODCAP_TEMPERATURE_FAN,
++    SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,
++    SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,
++    SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,
++    SMU_11_0_ODCAP_AUTO_UV_ENGINE,
++    SMU_11_0_ODCAP_AUTO_OC_ENGINE,
++    SMU_11_0_ODCAP_AUTO_OC_MEMORY,
++    SMU_11_0_ODCAP_FAN_CURVE,
++    SMU_11_0_ODCAP_COUNT,
++};
++
+ enum SMU_11_0_ODFEATURE_ID {
+-    SMU_11_0_ODFEATURE_GFXCLK_LIMITS        = 1 << 0,         //GFXCLK Limit 
feature
+-    SMU_11_0_ODFEATURE_GFXCLK_CURVE         = 1 << 1,         //GFXCLK Curve 
feature
+-    SMU_11_0_ODFEATURE_UCLK_MAX             = 1 << 2,         //UCLK Limit 
feature
+-    SMU_11_0_ODFEATURE_POWER_LIMIT          = 1 << 3,         //Power Limit 
feature
+-    SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT   = 1 << 4,         //Fan Acoustic 
RPM feature
+-    SMU_11_0_ODFEATURE_FAN_SPEED_MIN        = 1 << 5,         //Minimum Fan 
Speed feature
+-    SMU_11_0_ODFEATURE_TEMPERATURE_FAN      = 1 << 6,         //Fan Target 
Temperature Limit feature
+-    SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM   = 1 << 7,         //Operating 
Temperature Limit feature
+-    SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE   = 1 << 8,         //AC Timing 
Tuning feature
+-    SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9,         //Zero RPM 
feature
+-    SMU_11_0_ODFEATURE_AUTO_UV_ENGINE       = 1 << 10,        //Auto Under 
Volt GFXCLK feature
+-    SMU_11_0_ODFEATURE_AUTO_OC_ENGINE       = 1 << 11,        //Auto Over 
Clock GFXCLK feature
+-    SMU_11_0_ODFEATURE_AUTO_OC_MEMORY       = 1 << 12,        //Auto Over 
Clock MCLK feature
+-    SMU_11_0_ODFEATURE_FAN_CURVE            = 1 << 13,        //VICTOR TODO
++    SMU_11_0_ODFEATURE_GFXCLK_LIMITS        = 1 << 
SMU_11_0_ODCAP_GFXCLK_LIMITS,            //GFXCLK Limit feature
++    SMU_11_0_ODFEATURE_GFXCLK_CURVE         = 1 << 
SMU_11_0_ODCAP_GFXCLK_CURVE,             //GFXCLK Curve feature
++    SMU_11_0_ODFEATURE_UCLK_MAX             = 1 << SMU_11_0_ODCAP_UCLK_MAX,   
              //UCLK Limit feature
++    SMU_11_0_ODFEATURE_POWER_LIMIT          = 1 << 
SMU_11_0_ODCAP_POWER_LIMIT,              //Power Limit feature
++    SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT   = 1 << 
SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,       //Fan Acoustic RPM feature
++    SMU_11_0_ODFEATURE_FAN_SPEED_MIN        = 1 << 
SMU_11_0_ODCAP_FAN_SPEED_MIN,            //Minimum Fan Speed feature
++    SMU_11_0_ODFEATURE_TEMPERATURE_FAN      = 1 << 
SMU_11_0_ODCAP_TEMPERATURE_FAN,          //Fan Target Temperature Limit feature
++    SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM   = 1 << 
SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,       //Operating Temperature Limit feature
++    SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE   = 1 << 
SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,       //AC Timing Tuning feature
++    SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 
SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,     //Zero RPM feature
++    SMU_11_0_ODFEATURE_AUTO_UV_ENGINE       = 1 << 
SMU_11_0_ODCAP_AUTO_UV_ENGINE,           //Auto Under Volt GFXCLK feature
++    SMU_11_0_ODFEATURE_AUTO_OC_ENGINE       = 1 << 
SMU_11_0_ODCAP_AUTO_OC_ENGINE,           //Auto Over Clock GFXCLK feature
++    SMU_11_0_ODFEATURE_AUTO_OC_MEMORY       = 1 << 
SMU_11_0_ODCAP_AUTO_OC_MEMORY,           //Auto Over Clock MCLK feature
++    SMU_11_0_ODFEATURE_FAN_CURVE            = 1 << SMU_11_0_ODCAP_FAN_CURVE,  
              //Fan Curve feature
+     SMU_11_0_ODFEATURE_COUNT                = 14,
+ };
+ #define SMU_11_0_MAX_ODFEATURE    32          //Maximum Number of OD Features
+diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+index e3f8c45e7467..2cf81cafc669 100644
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -705,9 +705,9 @@ static bool navi10_is_support_fine_grained_dpm(struct 
smu_context *smu, enum smu
+       return dpm_desc->SnapToDiscrete == 0 ? true : false;
+ }
+ 
+-static inline bool navi10_od_feature_is_supported(struct 
smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
++static inline bool navi10_od_feature_is_supported(struct 
smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
+ {
+-      return od_table->cap[feature];
++      return od_table->cap[cap];
+ }
+ 
+ static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table 
*od_table,
+@@ -815,7 +815,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
+       case SMU_OD_SCLK:
+               if (!smu->od_enabled || !od_table || !od_settings)
+                       break;
+-              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
++              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_GFXCLK_LIMITS))
+                       break;
+               size += sprintf(buf + size, "OD_SCLK:\n");
+               size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", 
od_table->GfxclkFmin, od_table->GfxclkFmax);
+@@ -823,7 +823,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
+       case SMU_OD_MCLK:
+               if (!smu->od_enabled || !od_table || !od_settings)
+                       break;
+-              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_UCLK_MAX))
++              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_UCLK_MAX))
+                       break;
+               size += sprintf(buf + size, "OD_MCLK:\n");
+               size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
+@@ -831,7 +831,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
+       case SMU_OD_VDDC_CURVE:
+               if (!smu->od_enabled || !od_table || !od_settings)
+                       break;
+-              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_CURVE))
++              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_GFXCLK_CURVE))
+                       break;
+               size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+               for (i = 0; i < 3; i++) {
+@@ -856,7 +856,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
+                       break;
+               size = sprintf(buf, "%s:\n", "OD_RANGE");
+ 
+-              if (navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
++              if (navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
+                       navi10_od_setting_get_range(od_settings, 
SMU_11_0_ODSETTING_GFXCLKFMIN,
+                                                   &min_value, NULL);
+                       navi10_od_setting_get_range(od_settings, 
SMU_11_0_ODSETTING_GFXCLKFMAX,
+@@ -865,14 +865,14 @@ static int navi10_print_clk_levels(struct smu_context 
*smu,
+                                       min_value, max_value);
+               }
+ 
+-              if (navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_UCLK_MAX)) {
++              if (navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_UCLK_MAX)) {
+                       navi10_od_setting_get_range(od_settings, 
SMU_11_0_ODSETTING_UCLKFMAX,
+                                                   &min_value, &max_value);
+                       size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+                                       min_value, max_value);
+               }
+ 
+-              if (navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
++              if (navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_GFXCLK_CURVE)) {
+                       navi10_od_setting_get_range(od_settings, 
SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+                                                   &min_value, &max_value);
+                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz 
%10uMhz\n",
+@@ -1956,7 +1956,7 @@ static int navi10_od_edit_dpm_table(struct smu_context 
*smu, enum PP_OD_DPM_TABL
+ 
+       switch (type) {
+       case PP_OD_EDIT_SCLK_VDDC_TABLE:
+-              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
++              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
+                       pr_warn("GFXCLK_LIMITS not supported!\n");
+                       return -ENOTSUPP;
+               }
+@@ -2002,7 +2002,7 @@ static int navi10_od_edit_dpm_table(struct smu_context 
*smu, enum PP_OD_DPM_TABL
+               }
+               break;
+       case PP_OD_EDIT_MCLK_VDDC_TABLE:
+-              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_UCLK_MAX)) {
++              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_UCLK_MAX)) {
+                       pr_warn("UCLK_MAX not supported!\n");
+                       return -ENOTSUPP;
+               }
+@@ -2043,7 +2043,7 @@ static int navi10_od_edit_dpm_table(struct smu_context 
*smu, enum PP_OD_DPM_TABL
+               }
+               break;
+       case PP_OD_EDIT_VDDC_CURVE:
+-              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
++              if (!navi10_od_feature_is_supported(od_settings, 
SMU_11_0_ODCAP_GFXCLK_CURVE)) {
+                       pr_warn("GFXCLK_CURVE not supported!\n");
+                       return -ENOTSUPP;
+               }
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 141ba31cf548..6cd90cb4b6b1 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -3772,7 +3772,8 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr 
*mgr,
+               else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
+                       guid = msg->u.resource_stat.guid;
+ 
+-              mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
++              if (guid)
++                      mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+       } else {
+               mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
+       }
+diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
+index d6d2e6fb8674..40036eff709c 100644
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -594,8 +594,10 @@ static void i915_pmu_enable(struct perf_event *event)
+               container_of(event->pmu, typeof(*i915), pmu.base);
+       unsigned int bit = event_enabled_bit(event);
+       struct i915_pmu *pmu = &i915->pmu;
++      intel_wakeref_t wakeref;
+       unsigned long flags;
+ 
++      wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+       spin_lock_irqsave(&pmu->lock, flags);
+ 
+       /*
+@@ -605,6 +607,14 @@ static void i915_pmu_enable(struct perf_event *event)
+       BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
+       GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+       GEM_BUG_ON(pmu->enable_count[bit] == ~0);
++
++      if (pmu->enable_count[bit] == 0 &&
++          config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
++              pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
++              pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
++              pmu->sleep_last = ktime_get();
++      }
++
+       pmu->enable |= BIT_ULL(bit);
+       pmu->enable_count[bit]++;
+ 
+@@ -645,6 +655,8 @@ static void i915_pmu_enable(struct perf_event *event)
+        * an existing non-zero value.
+        */
+       local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
++
++      intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ }
+ 
+ static void i915_pmu_disable(struct perf_event *event)
+diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
b/drivers/gpu/drm/panfrost/panfrost_drv.c
+index 88b431a267af..273d67e251c2 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -166,6 +166,7 @@ panfrost_lookup_bos(struct drm_device *dev,
+                       break;
+               }
+ 
++              atomic_inc(&bo->gpu_usecount);
+               job->mappings[i] = mapping;
+       }
+ 
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h 
b/drivers/gpu/drm/panfrost/panfrost_gem.h
+index ca1bc9019600..b3517ff9630c 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
+@@ -30,6 +30,12 @@ struct panfrost_gem_object {
+               struct mutex lock;
+       } mappings;
+ 
++      /*
++       * Count the number of jobs referencing this BO so we don't let the
++       * shrinker reclaim this object prematurely.
++       */
++      atomic_t gpu_usecount;
++
+       bool noexec             :1;
+       bool is_heap            :1;
+ };
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c 
b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+index f5dd7b29bc95..288e46c40673 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+@@ -41,6 +41,9 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ 
++      if (atomic_read(&bo->gpu_usecount))
++              return false;
++
+       if (!mutex_trylock(&shmem->pages_lock))
+               return false;
+ 
+diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
+index e364ee00f3d0..4d383831c1fc 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_job.c
++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
+@@ -269,8 +269,13 @@ static void panfrost_job_cleanup(struct kref *ref)
+       dma_fence_put(job->render_done_fence);
+ 
+       if (job->mappings) {
+-              for (i = 0; i < job->bo_count; i++)
++              for (i = 0; i < job->bo_count; i++) {
++                      if (!job->mappings[i])
++                              break;
++
++                      atomic_dec(&job->mappings[i]->obj->gpu_usecount);
+                       panfrost_gem_mapping_put(job->mappings[i]);
++              }
+               kvfree(job->mappings);
+       }
+ 
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c 
b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index a5757b11b730..5b54eff12cc0 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
+       }
+ 
+       drm_mode_config_init(drm);
+-      drm->mode_config.allow_fb_modifiers = true;
+ 
+       ret = component_bind_all(drm->dev, drm);
+       if (ret) {
+diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
+index 5bd60ded3d81..909eba43664a 100644
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct 
drm_device *dev,
+               return ERR_CAST(obj);
+ 
+       ret = drm_gem_handle_create(file, &obj->base, handle);
+-      drm_gem_object_put_unlocked(&obj->base);
+-      if (ret)
++      if (ret) {
++              drm_gem_object_put_unlocked(&obj->base);
+               return ERR_PTR(ret);
++      }
+ 
+       return &obj->base;
+ }
+@@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, 
struct drm_device *dev,
+       args->size = gem_object->size;
+       args->pitch = pitch;
+ 
+-      DRM_DEBUG("Created object of size %lld\n", size);
++      drm_gem_object_put_unlocked(gem_object);
++
++      DRM_DEBUG("Created object of size %llu\n", args->size);
+ 
+       return 0;
+ }
+diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
+index f01f4887fb2e..a91ed01abb68 100644
+--- a/drivers/hwmon/pmbus/ltc2978.c
++++ b/drivers/hwmon/pmbus/ltc2978.c
+@@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, 
ltc3880, ltc3882,
+ 
+ #define LTC_POLL_TIMEOUT              100     /* in milli-seconds */
+ 
+-#define LTC_NOT_BUSY                  BIT(5)
+-#define LTC_NOT_PENDING                       BIT(4)
++#define LTC_NOT_BUSY                  BIT(6)
++#define LTC_NOT_PENDING                       BIT(5)
+ 
+ /*
+  * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, 
which
+diff --git a/drivers/infiniband/core/security.c 
b/drivers/infiniband/core/security.c
+index 6eb6d2717ca5..2b4d80393bd0 100644
+--- a/drivers/infiniband/core/security.c
++++ b/drivers/infiniband/core/security.c
+@@ -339,22 +339,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct 
ib_qp *qp,
+       if (!new_pps)
+               return NULL;
+ 
+-      if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
+-              if (!qp_pps) {
+-                      new_pps->main.port_num = qp_attr->port_num;
+-                      new_pps->main.pkey_index = qp_attr->pkey_index;
+-              } else {
+-                      new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
+-                                                qp_attr->port_num :
+-                                                qp_pps->main.port_num;
+-
+-                      new_pps->main.pkey_index =
+-                                      (qp_attr_mask & IB_QP_PKEY_INDEX) ?
+-                                       qp_attr->pkey_index :
+-                                       qp_pps->main.pkey_index;
+-              }
++      if (qp_attr_mask & IB_QP_PORT)
++              new_pps->main.port_num =
++                      (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
++      if (qp_attr_mask & IB_QP_PKEY_INDEX)
++              new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
++                                                    qp_attr->pkey_index;
++      if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
+               new_pps->main.state = IB_PORT_PKEY_VALID;
+-      } else if (qp_pps) {
++
++      if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
+               new_pps->main.port_num = qp_pps->main.port_num;
+               new_pps->main.pkey_index = qp_pps->main.pkey_index;
+               if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
+diff --git a/drivers/infiniband/core/user_mad.c 
b/drivers/infiniband/core/user_mad.c
+index d1407fa378e8..1235ffb2389b 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
+       struct ib_umad_file *file;
+       int id;
+ 
++      cdev_device_del(&port->sm_cdev, &port->sm_dev);
++      cdev_device_del(&port->cdev, &port->dev);
++
+       mutex_lock(&port->file_mutex);
+ 
+       /* Mark ib_dev NULL and block ioctl or other file ops to progress
+@@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
+ 
+       mutex_unlock(&port->file_mutex);
+ 
+-      cdev_device_del(&port->sm_cdev, &port->sm_dev);
+-      cdev_device_del(&port->cdev, &port->dev);
+       ida_free(&umad_ida, port->dev_num);
+ 
+       /* balances device_initialize() */
+diff --git a/drivers/infiniband/core/uverbs_cmd.c 
b/drivers/infiniband/core/uverbs_cmd.c
+index 06ed32c8662f..86e93ac46d75 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -2720,12 +2720,6 @@ static int kern_spec_to_ib_spec_action(struct 
uverbs_attr_bundle *attrs,
+       return 0;
+ }
+ 
+-static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
+-{
+-      /* Returns user space filter size, includes padding */
+-      return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
+-}
+-
+ static ssize_t spec_filter_size(const void *kern_spec_filter, u16 
kern_filter_size,
+                               u16 ib_real_filter_sz)
+ {
+@@ -2869,11 +2863,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum 
ib_flow_spec_type type,
+ static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
+                                      union ib_flow_spec *ib_spec)
+ {
+-      ssize_t kern_filter_sz;
++      size_t kern_filter_sz;
+       void *kern_spec_mask;
+       void *kern_spec_val;
+ 
+-      kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
++      if (check_sub_overflow((size_t)kern_spec->hdr.size,
++                             sizeof(struct ib_uverbs_flow_spec_hdr),
++                             &kern_filter_sz))
++              return -EINVAL;
++
++      kern_filter_sz /= 2;
+ 
+       kern_spec_val = (void *)kern_spec +
+               sizeof(struct ib_uverbs_flow_spec_hdr);
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c 
b/drivers/infiniband/hw/cxgb4/cm.c
+index ee1182f9b627..d69dece3b1d5 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct 
sk_buff *skb)
+                                      C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+               }
+ 
++              /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
++               * when entering the TERM state the RNIC MUST initiate a CLOSE.
++               */
++              c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+               c4iw_put_ep(&ep->com);
+       } else
+               pr_warn("TERM received tid %u no ep/qp\n", tid);
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c 
b/drivers/infiniband/hw/cxgb4/qp.c
+index bbcac539777a..89ac2f9ae6dd 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct 
c4iw_qp *qhp,
+                       qhp->attr.layer_etype = attrs->layer_etype;
+                       qhp->attr.ecode = attrs->ecode;
+                       ep = qhp->ep;
+-                      c4iw_get_ep(&ep->com);
+-                      disconnect = 1;
+                       if (!internal) {
++                              c4iw_get_ep(&ep->com);
+                               terminate = 1;
++                              disconnect = 1;
+                       } else {
+                               terminate = qhp->attr.send_term;
+                               ret = rdma_fini(rhp, qhp, ep);
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c 
b/drivers/infiniband/hw/hfi1/affinity.c
+index c142b23bb401..1aeea5d65c01 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct 
hfi1_devdata *dd,
+                         rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
+       }
+ 
++      free_cpumask_var(available_cpus);
++      free_cpumask_var(non_intr_cpus);
+       return 0;
+ 
+ fail:
+diff --git a/drivers/infiniband/hw/hfi1/file_ops.c 
b/drivers/infiniband/hw/hfi1/file_ops.c
+index 7c5e3fb22413..b7bb55b57889 100644
+--- a/drivers/infiniband/hw/hfi1/file_ops.c
++++ b/drivers/infiniband/hw/hfi1/file_ops.c
+@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct 
file *fp)
+ 
+       fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+ 
+-      if (fd) {
+-              fd->rec_cpu_num = -1; /* no cpu affinity by default */
+-              fd->mm = current->mm;
+-              mmgrab(fd->mm);
+-              fd->dd = dd;
+-              kobject_get(&fd->dd->kobj);
+-              fp->private_data = fd;
+-      } else {
+-              fp->private_data = NULL;
+-
+-              if (atomic_dec_and_test(&dd->user_refcount))
+-                      complete(&dd->user_comp);
+-
+-              return -ENOMEM;
+-      }
+-
++      if (!fd || init_srcu_struct(&fd->pq_srcu))
++              goto nomem;
++      spin_lock_init(&fd->pq_rcu_lock);
++      spin_lock_init(&fd->tid_lock);
++      spin_lock_init(&fd->invalid_lock);
++      fd->rec_cpu_num = -1; /* no cpu affinity by default */
++      fd->mm = current->mm;
++      mmgrab(fd->mm);
++      fd->dd = dd;
++      kobject_get(&fd->dd->kobj);
++      fp->private_data = fd;
+       return 0;
++nomem:
++      kfree(fd);
++      fp->private_data = NULL;
++      if (atomic_dec_and_test(&dd->user_refcount))
++              complete(&dd->user_comp);
++      return -ENOMEM;
+ }
+ 
+ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned 
int cmd,
+ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+ {
+       struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
+-      struct hfi1_user_sdma_pkt_q *pq = fd->pq;
++      struct hfi1_user_sdma_pkt_q *pq;
+       struct hfi1_user_sdma_comp_q *cq = fd->cq;
+       int done = 0, reqs = 0;
+       unsigned long dim = from->nr_segs;
++      int idx;
+ 
+-      if (!cq || !pq)
++      idx = srcu_read_lock(&fd->pq_srcu);
++      pq = srcu_dereference(fd->pq, &fd->pq_srcu);
++      if (!cq || !pq) {
++              srcu_read_unlock(&fd->pq_srcu, idx);
+               return -EIO;
++      }
+ 
+-      if (!iter_is_iovec(from) || !dim)
++      if (!iter_is_iovec(from) || !dim) {
++              srcu_read_unlock(&fd->pq_srcu, idx);
+               return -EINVAL;
++      }
+ 
+       trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
+ 
+-      if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
++      if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
++              srcu_read_unlock(&fd->pq_srcu, idx);
+               return -ENOSPC;
++      }
+ 
+       while (dim) {
+               int ret;
+@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct 
iov_iter *from)
+               reqs++;
+       }
+ 
++      srcu_read_unlock(&fd->pq_srcu, idx);
+       return reqs;
+ }
+ 
+@@ -707,6 +718,7 @@ done:
+       if (atomic_dec_and_test(&dd->user_refcount))
+               complete(&dd->user_comp);
+ 
++      cleanup_srcu_struct(&fdata->pq_srcu);
+       kfree(fdata);
+       return 0;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h 
b/drivers/infiniband/hw/hfi1/hfi.h
+index fc10d65fc3e1..27dea5e1e201 100644
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1436,10 +1436,13 @@ struct mmu_rb_handler;
+ 
+ /* Private data for file operations */
+ struct hfi1_filedata {
++      struct srcu_struct pq_srcu;
+       struct hfi1_devdata *dd;
+       struct hfi1_ctxtdata *uctxt;
+       struct hfi1_user_sdma_comp_q *cq;
+-      struct hfi1_user_sdma_pkt_q *pq;
++      /* update side lock for SRCU */
++      spinlock_t pq_rcu_lock;
++      struct hfi1_user_sdma_pkt_q __rcu *pq;
+       u16 subctxt;
+       /* for cpu affinity; -1 if none */
+       int rec_cpu_num;
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c 
b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+index f05742ac0949..4da03f823474 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -87,9 +87,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
+ {
+       int ret = 0;
+ 
+-      spin_lock_init(&fd->tid_lock);
+-      spin_lock_init(&fd->invalid_lock);
+-
+       fd->entry_to_rb = kcalloc(uctxt->expected_count,
+                                 sizeof(struct rb_node *),
+                                 GFP_KERNEL);
+@@ -142,10 +139,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
+ {
+       struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ 
++      mutex_lock(&uctxt->exp_mutex);
+       if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
+               unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
+       if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
+               unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
++      mutex_unlock(&uctxt->exp_mutex);
+ 
+       kfree(fd->invalid_tids);
+       fd->invalid_tids = NULL;
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c 
b/drivers/infiniband/hw/hfi1/user_sdma.c
+index fd754a16475a..c2f0d9ba93de 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata 
*uctxt,
+       pq = kzalloc(sizeof(*pq), GFP_KERNEL);
+       if (!pq)
+               return -ENOMEM;
+-
+       pq->dd = dd;
+       pq->ctxt = uctxt->ctxt;
+       pq->subctxt = fd->subctxt;
+@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata 
*uctxt,
+               goto pq_mmu_fail;
+       }
+ 
+-      fd->pq = pq;
++      rcu_assign_pointer(fd->pq, pq);
+       fd->cq = cq;
+ 
+       return 0;
+@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ 
+       trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
+ 
+-      pq = fd->pq;
++      spin_lock(&fd->pq_rcu_lock);
++      pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
++                                  lockdep_is_held(&fd->pq_rcu_lock));
+       if (pq) {
++              rcu_assign_pointer(fd->pq, NULL);
++              spin_unlock(&fd->pq_rcu_lock);
++              synchronize_srcu(&fd->pq_srcu);
++              /* at this point there can be no more new requests */
+               if (pq->handler)
+                       hfi1_mmu_rb_unregister(pq->handler);
+               iowait_sdma_drain(&pq->busy);
+@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+               kfree(pq->req_in_use);
+               kmem_cache_destroy(pq->txreq_cache);
+               kfree(pq);
+-              fd->pq = NULL;
++      } else {
++              spin_unlock(&fd->pq_rcu_lock);
+       }
+       if (fd->cq) {
+               vfree(fd->cq->comps);
+@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata 
*fd,
+ {
+       int ret = 0, i;
+       struct hfi1_ctxtdata *uctxt = fd->uctxt;
+-      struct hfi1_user_sdma_pkt_q *pq = fd->pq;
++      struct hfi1_user_sdma_pkt_q *pq =
++              srcu_dereference(fd->pq, &fd->pq_srcu);
+       struct hfi1_user_sdma_comp_q *cq = fd->cq;
+       struct hfi1_devdata *dd = pq->dd;
+       unsigned long idx = 0;
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 7e51870e9e01..89ba2f6cd815 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3394,9 +3394,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+       struct mlx5_ib_qp_base *base;
+       u32 set_id;
+ 
+-      if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
+-              return 0;
+-
+       if (counter)
+               set_id = counter->id;
+       else
+@@ -6529,6 +6526,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
+  */
+ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
+ {
++      struct mlx5_ib_dev *dev = to_mdev(qp->device);
+       struct mlx5_ib_qp *mqp = to_mqp(qp);
+       int err = 0;
+ 
+@@ -6538,6 +6536,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct 
rdma_counter *counter)
+               goto out;
+       }
+ 
++      if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
++              err = -EOPNOTSUPP;
++              goto out;
++      }
++
+       if (mqp->state == IB_QPS_RTS) {
+               err = __mlx5_ib_qp_set_counter(qp, counter);
+               if (!err)
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c 
b/drivers/infiniband/sw/rdmavt/qp.c
+index 3cdf75d0c7a4..7858d499db03 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -61,6 +61,8 @@
+ #define RVT_RWQ_COUNT_THRESHOLD 16
+ 
+ static void rvt_rc_timeout(struct timer_list *t);
++static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
++                       enum ib_qp_type type);
+ 
+ /*
+  * Convert the AETH RNR timeout code into the number of microseconds.
+@@ -452,40 +454,41 @@ no_qp_table:
+ }
+ 
+ /**
+- * free_all_qps - check for QPs still in use
++ * rvt_free_qp_cb - callback function to reset a qp
++ * @qp: the qp to reset
++ * @v: a 64-bit value
++ *
++ * This function resets the qp and removes it from the
++ * qp hash table.
++ */
++static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
++{
++      unsigned int *qp_inuse = (unsigned int *)v;
++      struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
++
++      /* Reset the qp and remove it from the qp hash list */
++      rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
++
++      /* Increment the qp_inuse count */
++      (*qp_inuse)++;
++}
++
++/**
++ * rvt_free_all_qps - check for QPs still in use
+  * @rdi: rvt device info structure
+  *
+  * There should not be any QPs still in use.
+  * Free memory for table.
++ * Return the number of QPs still in use.
+  */
+ static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
+ {
+-      unsigned long flags;
+-      struct rvt_qp *qp;
+-      unsigned n, qp_inuse = 0;
+-      spinlock_t *ql; /* work around too long line below */
+-
+-      if (rdi->driver_f.free_all_qps)
+-              qp_inuse = rdi->driver_f.free_all_qps(rdi);
++      unsigned int qp_inuse = 0;
+ 
+       qp_inuse += rvt_mcast_tree_empty(rdi);
+ 
+-      if (!rdi->qp_dev)
+-              return qp_inuse;
+-
+-      ql = &rdi->qp_dev->qpt_lock;
+-      spin_lock_irqsave(ql, flags);
+-      for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
+-              qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
+-                                             lockdep_is_held(ql));
+-              RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
++      rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
+ 
+-              for (; qp; qp = rcu_dereference_protected(qp->next,
+-                                                        lockdep_is_held(ql)))
+-                      qp_inuse++;
+-      }
+-      spin_unlock_irqrestore(ql, flags);
+-      synchronize_rcu();
+       return qp_inuse;
+ }
+ 
+@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct 
rvt_qp *qp,
+ }
+ 
+ /**
+- * rvt_reset_qp - initialize the QP state to the reset state
++ * _rvt_reset_qp - initialize the QP state to the reset state
+  * @qp: the QP to reset
+  * @type: the QP type
+  *
+  * r_lock, s_hlock, and s_lock are required to be held by the caller
+  */
+-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+-                       enum ib_qp_type type)
++static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
++                        enum ib_qp_type type)
+       __must_hold(&qp->s_lock)
+       __must_hold(&qp->s_hlock)
+       __must_hold(&qp->r_lock)
+@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct 
rvt_qp *qp,
+       lockdep_assert_held(&qp->s_lock);
+ }
+ 
++/**
++ * rvt_reset_qp - initialize the QP state to the reset state
++ * @rdi: the device info
++ * @qp: the QP to reset
++ * @type: the QP type
++ *
++ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
++ * before calling _rvt_reset_qp().
++ */
++static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
++                       enum ib_qp_type type)
++{
++      spin_lock_irq(&qp->r_lock);
++      spin_lock(&qp->s_hlock);
++      spin_lock(&qp->s_lock);
++      _rvt_reset_qp(rdi, qp, type);
++      spin_unlock(&qp->s_lock);
++      spin_unlock(&qp->s_hlock);
++      spin_unlock_irq(&qp->r_lock);
++}
++
+ /** rvt_free_qpn - Free a qpn from the bit map
+  * @qpt: QP table
+  * @qpn: queue pair number to free
+@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr 
*attr,
+       switch (new_state) {
+       case IB_QPS_RESET:
+               if (qp->state != IB_QPS_RESET)
+-                      rvt_reset_qp(rdi, qp, ibqp->qp_type);
++                      _rvt_reset_qp(rdi, qp, ibqp->qp_type);
+               break;
+ 
+       case IB_QPS_RTR:
+@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata 
*udata)
+       struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+       struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ 
+-      spin_lock_irq(&qp->r_lock);
+-      spin_lock(&qp->s_hlock);
+-      spin_lock(&qp->s_lock);
+       rvt_reset_qp(rdi, qp, ibqp->qp_type);
+-      spin_unlock(&qp->s_lock);
+-      spin_unlock(&qp->s_hlock);
+-      spin_unlock_irq(&qp->r_lock);
+ 
+       wait_event(qp->wait, !atomic_read(&qp->refcount));
+       /* qpn is now available for use again */
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c 
b/drivers/infiniband/sw/rxe/rxe_comp.c
+index 116cafc9afcf..4bc88708b355 100644
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
+                                       qp->comp.psn = pkt->psn;
+                                       if (qp->req.wait_psn) {
+                                               qp->req.wait_psn = 0;
+-                                              rxe_run_task(&qp->req.task, 1);
++                                              rxe_run_task(&qp->req.task, 0);
+                                       }
+                               }
+                               return COMPST_ERROR_RETRY;
+@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct 
rxe_send_wqe *wqe)
+        */
+       if (qp->req.wait_fence) {
+               qp->req.wait_fence = 0;
+-              rxe_run_task(&qp->req.task, 1);
++              rxe_run_task(&qp->req.task, 0);
+       }
+ }
+ 
+@@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp 
*qp,
+               if (qp->req.need_rd_atomic) {
+                       qp->comp.timeout_retry = 0;
+                       qp->req.need_rd_atomic = 0;
+-                      rxe_run_task(&qp->req.task, 1);
++                      rxe_run_task(&qp->req.task, 0);
+               }
+       }
+ 
+@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
+                                                       RXE_CNT_COMP_RETRY);
+                                       qp->req.need_retry = 1;
+                                       qp->comp.started_retry = 1;
+-                                      rxe_run_task(&qp->req.task, 1);
++                                      rxe_run_task(&qp->req.task, 0);
+                               }
+ 
+                               if (pkt) {
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 1ae6f8bba9ae..2c666fb34625 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -146,7 +146,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
+       "LEN0042", /* Yoga */
+       "LEN0045",
+       "LEN0047",
+-      "LEN0049",
+       "LEN2000", /* S540 */
+       "LEN2001", /* Edge E431 */
+       "LEN2002", /* Edge E531 */
+@@ -166,9 +165,11 @@ static const char * const smbus_pnp_ids[] = {
+       /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
+       "LEN0048", /* X1 Carbon 3 */
+       "LEN0046", /* X250 */
++      "LEN0049", /* Yoga 11e */
+       "LEN004a", /* W541 */
+       "LEN005b", /* P50 */
+       "LEN005e", /* T560 */
++      "LEN006c", /* T470s */
+       "LEN0071", /* T480 */
+       "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
+       "LEN0073", /* X1 Carbon G5 (Elantech) */
+@@ -179,6 +180,7 @@ static const char * const smbus_pnp_ids[] = {
+       "LEN0097", /* X280 -> ALPS trackpoint */
+       "LEN009b", /* T580 */
+       "LEN200f", /* T450s */
++      "LEN2044", /* L470  */
+       "LEN2054", /* E480 */
+       "LEN2055", /* E580 */
+       "SYN3052", /* HP EliteBook 840 G4 */
+diff --git a/drivers/input/touchscreen/ili210x.c 
b/drivers/input/touchscreen/ili210x.c
+index 4a17096e83e1..84bf51d79888 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -321,7 +321,7 @@ static umode_t ili210x_calibrate_visible(struct kobject 
*kobj,
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ili210x *priv = i2c_get_clientdata(client);
+ 
+-      return priv->chip->has_calibrate_reg;
++      return priv->chip->has_calibrate_reg ? attr->mode : 0;
+ }
+ 
+ static const struct attribute_group ili210x_attr_group = {
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 105b7a7c0251..b3484def0a8b 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -176,7 +176,6 @@ int mmc_of_parse(struct mmc_host *host)
+       u32 bus_width, drv_type, cd_debounce_delay_ms;
+       int ret;
+       bool cd_cap_invert, cd_gpio_invert = false;
+-      bool ro_cap_invert, ro_gpio_invert = false;
+ 
+       if (!dev || !dev_fwnode(dev))
+               return 0;
+@@ -255,9 +254,11 @@ int mmc_of_parse(struct mmc_host *host)
+       }
+ 
+       /* Parse Write Protection */
+-      ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
+ 
+-      ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
++      if (device_property_read_bool(dev, "wp-inverted"))
++              host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
++
++      ret = mmc_gpiod_request_ro(host, "wp", 0, 0, NULL);
+       if (!ret)
+               dev_info(host->parent, "Got WP GPIO\n");
+       else if (ret != -ENOENT && ret != -ENOSYS)
+@@ -266,10 +267,6 @@ int mmc_of_parse(struct mmc_host *host)
+       if (device_property_read_bool(dev, "disable-wp"))
+               host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+ 
+-      /* See the comment on CD inversion above */
+-      if (ro_cap_invert ^ ro_gpio_invert)
+-              host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+-
+       if (device_property_read_bool(dev, "cap-sd-highspeed"))
+               host->caps |= MMC_CAP_SD_HIGHSPEED;
+       if (device_property_read_bool(dev, "cap-mmc-highspeed"))
+diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
+index da2596c5fa28..582ec3d720f6 100644
+--- a/drivers/mmc/core/slot-gpio.c
++++ b/drivers/mmc/core/slot-gpio.c
+@@ -241,6 +241,9 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char 
*con_id,
+                       return ret;
+       }
+ 
++      if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
++              gpiod_toggle_active_low(desc);
++
+       if (gpio_invert)
+               *gpio_invert = !gpiod_is_active_low(desc);
+ 
+diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
+index 024acc1b0a2e..b2bbcb09a49e 100644
+--- a/drivers/mmc/host/pxamci.c
++++ b/drivers/mmc/host/pxamci.c
+@@ -740,16 +740,16 @@ static int pxamci_probe(struct platform_device *pdev)
+                       goto out;
+               }
+ 
++              if (!host->pdata->gpio_card_ro_invert)
++                      mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
++
+               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+               if (ret && ret != -ENOENT) {
+                       dev_err(dev, "Failed requesting gpio_ro\n");
+                       goto out;
+               }
+-              if (!ret) {
++              if (!ret)
+                       host->use_ro_gpio = true;
+-                      mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+-                              0 : MMC_CAP2_RO_ACTIVE_HIGH;
+-              }
+ 
+               if (host->pdata->init)
+                       host->pdata->init(dev, pxamci_detect_irq, mmc);
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c 
b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 1c988d6a2433..dccb4df46512 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1381,13 +1381,14 @@ static int sdhci_esdhc_imx_probe_nondt(struct 
platform_device *pdev,
+                               host->mmc->parent->platform_data);
+       /* write_protect */
+       if (boarddata->wp_type == ESDHC_WP_GPIO) {
++              host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
++
+               err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+               if (err) {
+                       dev_err(mmc_dev(host->mmc),
+                               "failed to request write-protect gpio!\n");
+                       return err;
+               }
+-              host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+       }
+ 
+       /* card_detect */
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 5dc32b72e7fa..641c07347e8d 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3867,7 +3867,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
+       if (!log)
+               return;
+ 
+-      if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
++      if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
+                       sizeof(*log), 0))
+               dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
+       kfree(log);
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index d78d77686d7b..cda75118ccdb 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -774,7 +774,7 @@ static long pkey_unlocked_ioctl(struct file *filp, 
unsigned int cmd,
+                       return -EFAULT;
+               rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
+                                    ksp.seckey.seckey, ksp.protkey.protkey,
+-                                   NULL, &ksp.protkey.type);
++                                   &ksp.protkey.len, &ksp.protkey.type);
+               DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
+               if (rc)
+                       break;
+diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
+index 97acc2ba2912..de844b412110 100644
+--- a/drivers/spmi/spmi-pmic-arb.c
++++ b/drivers/spmi/spmi-pmic-arb.c
+@@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain 
*d,
+       return 0;
+ }
+ 
++static struct lock_class_key qpnpint_irq_lock_class, 
qpnpint_irq_request_class;
+ 
+ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
+                                  struct irq_domain *domain, unsigned int virq,
+@@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb 
*pmic_arb,
+       else
+               handler = handle_level_irq;
+ 
++
++      irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
++                            &qpnpint_irq_request_class);
+       irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
+                           handler, NULL, NULL);
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 0c3c6450d1df..c1e47db439e2 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3164,6 +3164,7 @@ int __cold open_ctree(struct super_block *sb,
+       /* do not make disk changes in broken FS or nologreplay is given */
+       if (btrfs_super_log_root(disk_super) != 0 &&
+           !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
++              btrfs_info(fs_info, "start tree-log replay");
+               ret = btrfs_replay_log(fs_info, fs_devices);
+               if (ret) {
+                       err = ret;
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 6f417ff68980..bd6229fb2b6f 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -237,6 +237,17 @@ static void try_merge_map(struct extent_map_tree *tree, 
struct extent_map *em)
+       struct extent_map *merge = NULL;
+       struct rb_node *rb;
+ 
++      /*
++       * We can't modify an extent map that is in the tree and that is being
++       * used by another task, as it can cause that other task to see it in
++       * inconsistent state during the merging. We always have 1 reference for
++       * the tree and 1 for this task (which is unpinning the extent map or
++       * clearing the logging flag), so anything > 2 means it's being used by
++       * other tasks too.
++       */
++      if (refcount_read(&em->refs) > 2)
++              return;
++
+       if (em->start != 0) {
+               rb = rb_prev(&em->rb_node);
+               if (rb)
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index b57f3618e58e..454a1015d026 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -744,6 +744,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+                */
+               be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
+               if (IS_ERR(be)) {
++                      kfree(ref);
+                       kfree(ra);
+                       ret = PTR_ERR(be);
+                       goto out;
+@@ -757,6 +758,8 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+                       "re-allocated a block that still has references to 
it!");
+                       dump_block_entry(fs_info, be);
+                       dump_ref_action(fs_info, ra);
++                      kfree(ref);
++                      kfree(ra);
+                       goto out_unlock;
+               }
+ 
+@@ -819,6 +822,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ "dropping a ref for a existing root that doesn't have a ref on the block");
+                               dump_block_entry(fs_info, be);
+                               dump_ref_action(fs_info, ra);
++                              kfree(ref);
+                               kfree(ra);
+                               goto out_unlock;
+                       }
+@@ -834,6 +838,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ "attempting to add another ref for an existing ref on a tree block");
+                       dump_block_entry(fs_info, be);
+                       dump_ref_action(fs_info, ra);
++                      kfree(ref);
+                       kfree(ra);
+                       goto out_unlock;
+               }
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 6ccfef72d0e1..c6557d44907a 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1803,6 +1803,8 @@ static int btrfs_remount(struct super_block *sb, int 
*flags, char *data)
+               }
+ 
+               if (btrfs_super_log_root(fs_info->super_copy) != 0) {
++                      btrfs_warn(fs_info,
++              "mount required to replay tree-log, cannot remount read-write");
+                       ret = -EINVAL;
+                       goto restore;
+               }
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 29a795f975df..9b5536451528 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -1020,10 +1020,6 @@ static int ceph_get_tree(struct fs_context *fc)
+       if (!fc->source)
+               return invalf(fc, "ceph: No source");
+ 
+-#ifdef CONFIG_CEPH_FS_POSIX_ACL
+-      fc->sb_flags |= SB_POSIXACL;
+-#endif
+-
+       /* create client (which we may/may not use) */
+       fsc = create_fs_client(pctx->opts, pctx->copts);
+       pctx->opts = NULL;
+@@ -1141,6 +1137,10 @@ static int ceph_init_fs_context(struct fs_context *fc)
+       fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
+       fsopt->congestion_kb = default_congestion_kb();
+ 
++#ifdef CONFIG_CEPH_FS_POSIX_ACL
++      fc->sb_flags |= SB_POSIXACL;
++#endif
++
+       fc->fs_private = pctx;
+       fc->ops = &ceph_context_ops;
+       return 0;
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 5492b9860baa..92b9c8221f07 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -414,7 +414,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses 
*ses)
+               seq_puts(s, "ntlm");
+               break;
+       case Kerberos:
+-              seq_printf(s, "krb5,cruid=%u", 
from_kuid_munged(&init_user_ns,ses->cred_uid));
++              seq_puts(s, "krb5");
+               break;
+       case RawNTLMSSP:
+               seq_puts(s, "ntlmssp");
+@@ -427,6 +427,10 @@ cifs_show_security(struct seq_file *s, struct cifs_ses 
*ses)
+ 
+       if (ses->sign)
+               seq_puts(s, "i");
++
++      if (ses->sectype == Kerberos)
++              seq_printf(s, ",cruid=%u",
++                         from_kuid_munged(&init_user_ns, ses->cred_uid));
+ }
+ 
+ static void
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 4b9c805ae5e1..65f76be0f454 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1115,7 +1115,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon 
*tcon,
+       void *data[1];
+       struct smb2_file_full_ea_info *ea = NULL;
+       struct kvec close_iov[1];
+-      int rc;
++      struct smb2_query_info_rsp *rsp;
++      int rc, used_len = 0;
+ 
+       if (smb3_encryption_required(tcon))
+               flags |= CIFS_TRANSFORM_REQ;
+@@ -1138,6 +1139,38 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon 
*tcon,
+                                                            cifs_sb);
+                       if (rc == -ENODATA)
+                               goto sea_exit;
++              } else {
++                      /* If we are adding a attribute we should first check
++                       * if there will be enough space available to store
++                       * the new EA. If not we should not add it since we
++                       * would not be able to even read the EAs back.
++                       */
++                      rc = smb2_query_info_compound(xid, tcon, utf16_path,
++                                    FILE_READ_EA,
++                                    FILE_FULL_EA_INFORMATION,
++                                    SMB2_O_INFO_FILE,
++                                    CIFSMaxBufSize -
++                                    MAX_SMB2_CREATE_RESPONSE_SIZE -
++                                    MAX_SMB2_CLOSE_RESPONSE_SIZE,
++                                    &rsp_iov[1], &resp_buftype[1], cifs_sb);
++                      if (rc == 0) {
++                              rsp = (struct smb2_query_info_rsp 
*)rsp_iov[1].iov_base;
++                              used_len = le32_to_cpu(rsp->OutputBufferLength);
++                      }
++                      free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++                      resp_buftype[1] = CIFS_NO_BUFFER;
++                      memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
++                      rc = 0;
++
++                      /* Use a fudge factor of 256 bytes in case we collide
++                       * with a different set_EAs command.
++                       */
++                      if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
++                         MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
++                         used_len + ea_name_len + ea_value_len + 1) {
++                              rc = -ENOSPC;
++                              goto sea_exit;
++                      }
+               }
+       }
+ 
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index 1ee04e76bbe0..0a734ffb4310 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -207,6 +207,7 @@ static int ext4_protect_reserved_inode(struct super_block 
*sb,
+               return PTR_ERR(inode);
+       num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+       while (i < num) {
++              cond_resched();
+               map.m_lblk = i;
+               map.m_len = num - i;
+               n = ext4_map_blocks(NULL, inode, &map, 0);
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index 060f8a6a5da9..4dc2615ab289 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -129,12 +129,14 @@ static int ext4_readdir(struct file *file, struct 
dir_context *ctx)
+               if (err != ERR_BAD_DX_DIR) {
+                       return err;
+               }
+-              /*
+-               * We don't set the inode dirty flag since it's not
+-               * critical that it get flushed back to the disk.
+-               */
+-              ext4_clear_inode_flag(file_inode(file),
+-                                    EXT4_INODE_INDEX);
++              /* Can we just clear INDEX flag to ignore htree information? */
++              if (!ext4_has_metadata_csum(sb)) {
++                      /*
++                       * We don't set the inode dirty flag since it's not
++                       * critical that it gets flushed back to the disk.
++                       */
++                      ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
++              }
+       }
+ 
+       if (ext4_has_inline_data(inode)) {
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index f8578caba40d..1fd6c1e2ce2a 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2482,8 +2482,11 @@ void ext4_insert_dentry(struct inode *inode,
+                       struct ext4_filename *fname);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
+-      if (!ext4_has_feature_dir_index(inode->i_sb))
++      if (!ext4_has_feature_dir_index(inode->i_sb)) {
++              /* ext4_iget() should have caught this... */
++              WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
+               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
++      }
+ }
+ static const unsigned char ext4_filetype_table[] = {
+       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 629a25d999f0..25191201ccdc 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4615,6 +4615,18 @@ struct inode *__ext4_iget(struct super_block *sb, 
unsigned long ino,
+               ret = -EFSCORRUPTED;
+               goto bad_inode;
+       }
++      /*
++       * If dir_index is not enabled but there's dir with INDEX flag set,
++       * we'd normally treat htree data as empty space. But with metadata
++       * checksumming that corrupts checksums so forbid that.
++       */
++      if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
++          ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
++              ext4_error_inode(inode, function, line, 0,
++                       "iget: Dir with htree data on filesystem without 
dir_index feature.");
++              ret = -EFSCORRUPTED;
++              goto bad_inode;
++      }
+       ei->i_disksize = inode->i_size;
+ #ifdef CONFIG_QUOTA
+       ei->i_reserved_quota = 0;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 2305b4374fd3..9d00e0dd2ba9 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct 
mmp_struct *mmp,
+ {
+       __ext4_warning(sb, function, line, "%s", msg);
+       __ext4_warning(sb, function, line,
+-                     "MMP failure info: last update time: %llu, last update "
+-                     "node: %s, last update device: %s",
+-                     (long long unsigned int) le64_to_cpu(mmp->mmp_time),
+-                     mmp->mmp_nodename, mmp->mmp_bdevname);
++                     "MMP failure info: last update time: %llu, last update 
node: %.*s, last update device: %.*s",
++                     (unsigned long long)le64_to_cpu(mmp->mmp_time),
++                     (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
++                     (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
+ }
+ 
+ /*
+@@ -154,6 +154,7 @@ static int kmmpd(void *data)
+       mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
+                                EXT4_MMP_MIN_CHECK_INTERVAL);
+       mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
++      BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
+       bdevname(bh->b_bdev, mmp->mmp_bdevname);
+ 
+       memcpy(mmp->mmp_nodename, init_utsname()->nodename,
+@@ -375,7 +376,8 @@ skip:
+       /*
+        * Start a kernel thread to update the MMP block periodically.
+        */
+-      EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
++      EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
++                                           (int)sizeof(mmp->mmp_bdevname),
+                                            bdevname(bh->b_bdev,
+                                                     mmp->mmp_bdevname));
+       if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 1cb42d940784..deb9f7a02976 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2207,6 +2207,13 @@ static int ext4_add_entry(handle_t *handle, struct 
dentry *dentry,
+               retval = ext4_dx_add_entry(handle, &fname, dir, inode);
+               if (!retval || (retval != ERR_BAD_DX_DIR))
+                       goto out;
++              /* Can we just ignore htree data? */
++              if (ext4_has_metadata_csum(sb)) {
++                      EXT4_ERROR_INODE(dir,
++                              "Directory has corrupted htree index.");
++                      retval = -EFSCORRUPTED;
++                      goto out;
++              }
+               ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+               dx_fallback++;
+               ext4_mark_inode_dirty(handle, dir);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 2937a8873fe1..c51d7ef2e467 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2964,17 +2964,11 @@ static int ext4_feature_set_ok(struct super_block *sb, 
int readonly)
+               return 0;
+       }
+ 
+-#ifndef CONFIG_QUOTA
+-      if (ext4_has_feature_quota(sb) && !readonly) {
++#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
++      if (!readonly && (ext4_has_feature_quota(sb) ||
++                        ext4_has_feature_project(sb))) {
+               ext4_msg(sb, KERN_ERR,
+-                       "Filesystem with quota feature cannot be mounted RDWR "
+-                       "without CONFIG_QUOTA");
+-              return 0;
+-      }
+-      if (ext4_has_feature_project(sb) && !readonly) {
+-              ext4_msg(sb, KERN_ERR,
+-                       "Filesystem with project quota feature cannot be 
mounted RDWR "
+-                       "without CONFIG_QUOTA");
++                       "The kernel was not built with CONFIG_QUOTA and 
CONFIG_QFMT_V2");
+               return 0;
+       }
+ #endif  /* CONFIG_QUOTA */
+@@ -3768,6 +3762,15 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
+        */
+       sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+ 
++      blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
++      if (blocksize < EXT4_MIN_BLOCK_SIZE ||
++          blocksize > EXT4_MAX_BLOCK_SIZE) {
++              ext4_msg(sb, KERN_ERR,
++                     "Unsupported filesystem blocksize %d (%d 
log_block_size)",
++                       blocksize, le32_to_cpu(es->s_log_block_size));
++              goto failed_mount;
++      }
++
+       if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
+               sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
+               sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
+@@ -3785,6 +3788,7 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
+                       ext4_msg(sb, KERN_ERR,
+                              "unsupported inode size: %d",
+                              sbi->s_inode_size);
++                      ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
+                       goto failed_mount;
+               }
+               /*
+@@ -3988,14 +3992,6 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
+       if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
+               goto failed_mount;
+ 
+-      blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+-      if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+-          blocksize > EXT4_MAX_BLOCK_SIZE) {
+-              ext4_msg(sb, KERN_ERR,
+-                     "Unsupported filesystem blocksize %d (%d 
log_block_size)",
+-                       blocksize, le32_to_cpu(es->s_log_block_size));
+-              goto failed_mount;
+-      }
+       if (le32_to_cpu(es->s_log_block_size) >
+           (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+               ext4_msg(sb, KERN_ERR,
+@@ -5540,9 +5536,15 @@ static int ext4_statfs_project(struct super_block *sb,
+               return PTR_ERR(dquot);
+       spin_lock(&dquot->dq_dqb_lock);
+ 
+-      limit = (dquot->dq_dqb.dqb_bsoftlimit ?
+-               dquot->dq_dqb.dqb_bsoftlimit :
+-               dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
++      limit = 0;
++      if (dquot->dq_dqb.dqb_bsoftlimit &&
++          (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit))
++              limit = dquot->dq_dqb.dqb_bsoftlimit;
++      if (dquot->dq_dqb.dqb_bhardlimit &&
++          (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
++              limit = dquot->dq_dqb.dqb_bhardlimit;
++      limit >>= sb->s_blocksize_bits;
++
+       if (limit && buf->f_blocks > limit) {
+               curblock = (dquot->dq_dqb.dqb_curspace +
+                           dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
+@@ -5552,9 +5554,14 @@ static int ext4_statfs_project(struct super_block *sb,
+                        (buf->f_blocks - curblock) : 0;
+       }
+ 
+-      limit = dquot->dq_dqb.dqb_isoftlimit ?
+-              dquot->dq_dqb.dqb_isoftlimit :
+-              dquot->dq_dqb.dqb_ihardlimit;
++      limit = 0;
++      if (dquot->dq_dqb.dqb_isoftlimit &&
++          (!limit || dquot->dq_dqb.dqb_isoftlimit < limit))
++              limit = dquot->dq_dqb.dqb_isoftlimit;
++      if (dquot->dq_dqb.dqb_ihardlimit &&
++          (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
++              limit = dquot->dq_dqb.dqb_ihardlimit;
++
+       if (limit && buf->f_files > limit) {
+               buf->f_files = limit;
+               buf->f_ffree =
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index 5147d2213b01..0dc4bb6de656 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -16,6 +16,7 @@
+ #include <linux/slab.h>
+ #include <linux/kthread.h>
+ #include <linux/rculist_nulls.h>
++#include <linux/fs_struct.h>
+ 
+ #include "io-wq.h"
+ 
+@@ -58,6 +59,7 @@ struct io_worker {
+       struct mm_struct *mm;
+       const struct cred *creds;
+       struct files_struct *restore_files;
++      struct fs_struct *restore_fs;
+ };
+ 
+ #if BITS_PER_LONG == 64
+@@ -150,6 +152,9 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct 
io_worker *worker)
+               task_unlock(current);
+       }
+ 
++      if (current->fs != worker->restore_fs)
++              current->fs = worker->restore_fs;
++
+       /*
+        * If we have an active mm, we need to drop the wq lock before unusing
+        * it. If we do, return true and let the caller retry the idle loop.
+@@ -310,6 +315,7 @@ static void io_worker_start(struct io_wqe *wqe, struct 
io_worker *worker)
+ 
+       worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+       worker->restore_files = current->files;
++      worker->restore_fs = current->fs;
+       io_wqe_inc_running(wqe, worker);
+ }
+ 
+@@ -456,6 +462,8 @@ next:
+               }
+               if (!worker->creds)
+                       worker->creds = override_creds(wq->creds);
++              if (work->fs && current->fs != work->fs)
++                      current->fs = work->fs;
+               if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
+                       work->flags |= IO_WQ_WORK_CANCEL;
+               if (worker->mm)
+diff --git a/fs/io-wq.h b/fs/io-wq.h
+index 3f5e356de980..bbab98d1d328 100644
+--- a/fs/io-wq.h
++++ b/fs/io-wq.h
+@@ -72,6 +72,7 @@ struct io_wq_work {
+       };
+       void (*func)(struct io_wq_work **);
+       struct files_struct *files;
++      struct fs_struct *fs;
+       unsigned flags;
+ };
+ 
+@@ -79,8 +80,9 @@ struct io_wq_work {
+       do {                                            \
+               (work)->list.next = NULL;               \
+               (work)->func = _func;                   \
+-              (work)->flags = 0;                      \
+               (work)->files = NULL;                   \
++              (work)->fs = NULL;                      \
++              (work)->flags = 0;                      \
+       } while (0)                                     \
+ 
+ typedef void (get_work_fn)(struct io_wq_work *);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index f470fb21467e..6ae692b02980 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1786,17 +1786,6 @@ static int io_alloc_async_ctx(struct io_kiocb *req)
+       return req->io == NULL;
+ }
+ 
+-static void io_rw_async(struct io_wq_work **workptr)
+-{
+-      struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+-      struct iovec *iov = NULL;
+-
+-      if (req->io->rw.iov != req->io->rw.fast_iov)
+-              iov = req->io->rw.iov;
+-      io_wq_submit_work(workptr);
+-      kfree(iov);
+-}
+-
+ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
+                            struct iovec *iovec, struct iovec *fast_iov,
+                            struct iov_iter *iter)
+@@ -1810,7 +1799,6 @@ static int io_setup_async_rw(struct io_kiocb *req, 
ssize_t io_size,
+ 
+               io_req_map_rw(req, io_size, iovec, fast_iov, iter);
+       }
+-      req->work.func = io_rw_async;
+       return 0;
+ }
+ 
+@@ -1897,8 +1885,7 @@ copy_iov:
+               }
+       }
+ out_free:
+-      if (!io_wq_current_is_worker())
+-              kfree(iovec);
++      kfree(iovec);
+       return ret;
+ }
+ 
+@@ -1991,6 +1978,12 @@ static int io_write(struct io_kiocb *req, struct 
io_kiocb **nxt,
+                       ret2 = call_write_iter(req->file, kiocb, &iter);
+               else
+                       ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
++              /*
++               * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just
++               * retry them without IOCB_NOWAIT.
++               */
++              if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
++                      ret2 = -EAGAIN;
+               if (!force_nonblock || ret2 != -EAGAIN) {
+                       kiocb_done(kiocb, ret2, nxt, req->in_async);
+               } else {
+@@ -2003,8 +1996,7 @@ copy_iov:
+               }
+       }
+ out_free:
+-      if (!io_wq_current_is_worker())
+-              kfree(iovec);
++      kfree(iovec);
+       return ret;
+ }
+ 
+@@ -2174,19 +2166,6 @@ static int io_sync_file_range(struct io_kiocb *req, 
struct io_kiocb **nxt,
+       return 0;
+ }
+ 
+-#if defined(CONFIG_NET)
+-static void io_sendrecv_async(struct io_wq_work **workptr)
+-{
+-      struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+-      struct iovec *iov = NULL;
+-
+-      if (req->io->rw.iov != req->io->rw.fast_iov)
+-              iov = req->io->msg.iov;
+-      io_wq_submit_work(workptr);
+-      kfree(iov);
+-}
+-#endif
+-
+ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe 
*sqe)
+ {
+ #if defined(CONFIG_NET)
+@@ -2254,17 +2233,19 @@ static int io_sendmsg(struct io_kiocb *req, struct 
io_kiocb **nxt,
+               if (force_nonblock && ret == -EAGAIN) {
+                       if (req->io)
+                               return -EAGAIN;
+-                      if (io_alloc_async_ctx(req))
++                      if (io_alloc_async_ctx(req)) {
++                              if (kmsg && kmsg->iov != kmsg->fast_iov)
++                                      kfree(kmsg->iov);
+                               return -ENOMEM;
++                      }
+                       memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
+-                      req->work.func = io_sendrecv_async;
+                       return -EAGAIN;
+               }
+               if (ret == -ERESTARTSYS)
+                       ret = -EINTR;
+       }
+ 
+-      if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
++      if (kmsg && kmsg->iov != kmsg->fast_iov)
+               kfree(kmsg->iov);
+       io_cqring_add_event(req, ret);
+       if (ret < 0)
+@@ -2346,17 +2327,19 @@ static int io_recvmsg(struct io_kiocb *req, struct 
io_kiocb **nxt,
+               if (force_nonblock && ret == -EAGAIN) {
+                       if (req->io)
+                               return -EAGAIN;
+-                      if (io_alloc_async_ctx(req))
++                      if (io_alloc_async_ctx(req)) {
++                              if (kmsg && kmsg->iov != kmsg->fast_iov)
++                                      kfree(kmsg->iov);
+                               return -ENOMEM;
++                      }
+                       memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
+-                      req->work.func = io_sendrecv_async;
+                       return -EAGAIN;
+               }
+               if (ret == -ERESTARTSYS)
+                       ret = -EINTR;
+       }
+ 
+-      if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
++      if (kmsg && kmsg->iov != kmsg->fast_iov)
+               kfree(kmsg->iov);
+       io_cqring_add_event(req, ret);
+       if (ret < 0)
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 7f0b362b3842..3845750f70ec 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -976,29 +976,33 @@ restart_loop:
+                * it. */
+ 
+               /*
+-              * A buffer which has been freed while still being journaled by
+-              * a previous transaction.
+-              */
+-              if (buffer_freed(bh)) {
++               * A buffer which has been freed while still being journaled
++               * by a previous transaction, refile the buffer to BJ_Forget of
++               * the running transaction. If the just committed transaction
++               * contains "add to orphan" operation, we can completely
++               * invalidate the buffer now. We are rather through in that
++               * since the buffer may be still accessible when blocksize <
++               * pagesize and it is attached to the last partial page.
++               */
++              if (buffer_freed(bh) && !jh->b_next_transaction) {
++                      struct address_space *mapping;
++
++                      clear_buffer_freed(bh);
++                      clear_buffer_jbddirty(bh);
++
+                       /*
+-                       * If the running transaction is the one containing
+-                       * "add to orphan" operation (b_next_transaction !=
+-                       * NULL), we have to wait for that transaction to
+-                       * commit before we can really get rid of the buffer.
+-                       * So just clear b_modified to not confuse transaction
+-                       * credit accounting and refile the buffer to
+-                       * BJ_Forget of the running transaction. If the just
+-                       * committed transaction contains "add to orphan"
+-                       * operation, we can completely invalidate the buffer
+-                       * now. We are rather through in that since the
+-                       * buffer may be still accessible when blocksize <
+-                       * pagesize and it is attached to the last partial
+-                       * page.
++                       * Block device buffers need to stay mapped all the
++                       * time, so it is enough to clear buffer_jbddirty and
++                       * buffer_freed bits. For the file mapping buffers (i.e.
++                       * journalled data) we need to unmap buffer and clear
++                       * more bits. We also need to be careful about the check
++                       * because the data page mapping can get cleared under
++                       * out hands, which alse need not to clear more bits
++                       * because the page and buffers will be freed and can
++                       * never be reused once we are done with them.
+                        */
+-                      jh->b_modified = 0;
+-                      if (!jh->b_next_transaction) {
+-                              clear_buffer_freed(bh);
+-                              clear_buffer_jbddirty(bh);
++                      mapping = READ_ONCE(bh->b_page->mapping);
++                      if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
+                               clear_buffer_mapped(bh);
+                               clear_buffer_new(bh);
+                               clear_buffer_req(bh);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 27b9f9dee434..0603dfa9ad90 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -2329,14 +2329,16 @@ static int journal_unmap_buffer(journal_t *journal, 
struct buffer_head *bh,
+                       return -EBUSY;
+               }
+               /*
+-               * OK, buffer won't be reachable after truncate. We just set
+-               * j_next_transaction to the running transaction (if there is
+-               * one) and mark buffer as freed so that commit code knows it
+-               * should clear dirty bits when it is done with the buffer.
++               * OK, buffer won't be reachable after truncate. We just clear
++               * b_modified to not confuse transaction credit accounting, and
++               * set j_next_transaction to the running transaction (if there
++               * is one) and mark buffer as freed so that commit code knows
++               * it should clear dirty bits when it is done with the buffer.
+                */
+               set_buffer_freed(bh);
+               if (journal->j_running_transaction && buffer_jbddirty(bh))
+                       jh->b_next_transaction = journal->j_running_transaction;
++              jh->b_modified = 0;
+               spin_unlock(&journal->j_list_lock);
+               spin_unlock(&jh->b_state_lock);
+               write_unlock(&journal->j_state_lock);
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index fe57b2b5314a..8e322bacde69 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -25,13 +25,29 @@
+ #include "internal.h"
+ #include "nfs4trace.h"
+ 
+-static void nfs_free_delegation(struct nfs_delegation *delegation)
++static atomic_long_t nfs_active_delegations;
++
++static void __nfs_free_delegation(struct nfs_delegation *delegation)
+ {
+       put_cred(delegation->cred);
+       delegation->cred = NULL;
+       kfree_rcu(delegation, rcu);
+ }
+ 
++static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
++{
++      if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
++              delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
++              atomic_long_dec(&nfs_active_delegations);
++      }
++}
++
++static void nfs_free_delegation(struct nfs_delegation *delegation)
++{
++      nfs_mark_delegation_revoked(delegation);
++      __nfs_free_delegation(delegation);
++}
++
+ /**
+  * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
+  * @delegation: delegation to process
+@@ -222,13 +238,18 @@ void nfs_inode_reclaim_delegation(struct inode *inode, 
const struct cred *cred,
+ 
+ static int nfs_do_return_delegation(struct inode *inode, struct 
nfs_delegation *delegation, int issync)
+ {
++      const struct cred *cred;
+       int res = 0;
+ 
+-      if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+-              res = nfs4_proc_delegreturn(inode,
+-                              delegation->cred,
++      if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
++              spin_lock(&delegation->lock);
++              cred = get_cred(delegation->cred);
++              spin_unlock(&delegation->lock);
++              res = nfs4_proc_delegreturn(inode, cred,
+                               &delegation->stateid,
+                               issync);
++              put_cred(cred);
++      }
+       return res;
+ }
+ 
+@@ -343,7 +364,8 @@ nfs_update_inplace_delegation(struct nfs_delegation 
*delegation,
+               delegation->stateid.seqid = update->stateid.seqid;
+               smp_wmb();
+               delegation->type = update->type;
+-              clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
++              if (test_and_clear_bit(NFS_DELEGATION_REVOKED, 
&delegation->flags))
++                      atomic_long_inc(&nfs_active_delegations);
+       }
+ }
+ 
+@@ -423,6 +445,8 @@ add_new:
+       rcu_assign_pointer(nfsi->delegation, delegation);
+       delegation = NULL;
+ 
++      atomic_long_inc(&nfs_active_delegations);
++
+       trace_nfs4_set_delegation(inode, type);
+ 
+       spin_lock(&inode->i_lock);
+@@ -432,7 +456,7 @@ add_new:
+ out:
+       spin_unlock(&clp->cl_lock);
+       if (delegation != NULL)
+-              nfs_free_delegation(delegation);
++              __nfs_free_delegation(delegation);
+       if (freeme != NULL) {
+               nfs_do_return_delegation(inode, freeme, 0);
+               nfs_free_delegation(freeme);
+@@ -760,13 +784,6 @@ static void 
nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
+       rcu_read_unlock();
+ }
+ 
+-static void nfs_mark_delegation_revoked(struct nfs_server *server,
+-              struct nfs_delegation *delegation)
+-{
+-      set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
+-      delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
+-}
+-
+ static void nfs_revoke_delegation(struct inode *inode,
+               const nfs4_stateid *stateid)
+ {
+@@ -794,7 +811,7 @@ static void nfs_revoke_delegation(struct inode *inode,
+               }
+               spin_unlock(&delegation->lock);
+       }
+-      nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
++      nfs_mark_delegation_revoked(delegation);
+       ret = true;
+ out:
+       rcu_read_unlock();
+@@ -833,7 +850,7 @@ void nfs_delegation_mark_returned(struct inode *inode,
+                       delegation->stateid.seqid = stateid->seqid;
+       }
+ 
+-      nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
++      nfs_mark_delegation_revoked(delegation);
+ 
+ out_clear_returning:
+       clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index a2759b4062ae..6ddb4f517d37 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5295,7 +5295,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header 
*hdr,
+       hdr->timestamp   = jiffies;
+ 
+       msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
+-      nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0);
++      nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
+       nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
+ }
+ 
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 18790b9e16b5..11fdb0cc9a83 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -752,6 +752,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 
acpi_dispatch_gpe(acpi_handle gpe_device, u3
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 
acpi_enable_all_runtime_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
++ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
+ 
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+                               acpi_get_gpe_device(u32 gpe_index,
+diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
+index 5215fdba6b9a..bf2d017dd7b7 100644
+--- a/include/linux/gpio/consumer.h
++++ b/include/linux/gpio/consumer.h
+@@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int 
array_size,
+ 
+ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
++void gpiod_toggle_active_low(struct gpio_desc *desc);
+ 
+ int gpiod_is_active_low(const struct gpio_desc *desc);
+ int gpiod_cansleep(const struct gpio_desc *desc);
+@@ -483,6 +484,12 @@ static inline int gpiod_set_transitory(struct gpio_desc 
*desc, bool transitory)
+       return -ENOSYS;
+ }
+ 
++static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
++{
++      /* GPIO can never have been requested */
++      WARN_ON(desc);
++}
++
+ static inline int gpiod_is_active_low(const struct gpio_desc *desc)
+ {
+       /* GPIO can never have been requested */
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index 6fc8843f1c9e..cd97d2c8840c 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -191,7 +191,7 @@ struct platform_s2idle_ops {
+       int (*begin)(void);
+       int (*prepare)(void);
+       int (*prepare_late)(void);
+-      void (*wake)(void);
++      bool (*wake)(void);
+       void (*restore_early)(void);
+       void (*restore)(void);
+       void (*end)(void);
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index aa145808e57a..77e6b5a83b06 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1004,12 +1004,11 @@ ieee80211_rate_get_vht_nss(const struct 
ieee80211_tx_rate *rate)
+ struct ieee80211_tx_info {
+       /* common information */
+       u32 flags;
+-      u8 band;
+-
+-      u8 hw_queue;
+-
+-      u16 ack_frame_id:6;
+-      u16 tx_time_est:10;
++      u32 band:3,
++          ack_frame_id:13,
++          hw_queue:4,
++          tx_time_est:10;
++      /* 2 free bits */
+ 
+       union {
+               struct {
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 1e12e6928bca..30892c4759b4 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -5932,11 +5932,14 @@ void cgroup_post_fork(struct task_struct *child)
+ 
+       spin_lock_irq(&css_set_lock);
+ 
+-      WARN_ON_ONCE(!list_empty(&child->cg_list));
+-      cset = task_css_set(current); /* current is @child's parent */
+-      get_css_set(cset);
+-      cset->nr_tasks++;
+-      css_set_move_task(child, NULL, cset, false);
++      /* init tasks are special, only link regular threads */
++      if (likely(child->pid)) {
++              WARN_ON_ONCE(!list_empty(&child->cg_list));
++              cset = task_css_set(current); /* current is @child's parent */
++              get_css_set(cset);
++              cset->nr_tasks++;
++              css_set_move_task(child, NULL, cset, false);
++      }
+ 
+       /*
+        * If the cgroup has to be frozen, the new task has too.  Let's set
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index f3b7239f1892..27f149f5d4a9 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -131,11 +131,12 @@ static void s2idle_loop(void)
+        * to avoid them upfront.
+        */
+       for (;;) {
+-              if (s2idle_ops && s2idle_ops->wake)
+-                      s2idle_ops->wake();
+-
+-              if (pm_wakeup_pending())
++              if (s2idle_ops && s2idle_ops->wake) {
++                      if (s2idle_ops->wake())
++                              break;
++              } else if (pm_wakeup_pending()) {
+                       break;
++              }
+ 
+               pm_wakeup_clear(false);
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index bfe756dee129..894fb81313fd 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7260,7 +7260,7 @@ capacity_from_percent(char *buf)
+                                            &req.percent);
+               if (req.ret)
+                       return req;
+-              if (req.percent > UCLAMP_PERCENT_SCALE) {
++              if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
+                       req.ret = -ERANGE;
+                       return req;
+               }
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 000c742d0527..6aee699deb28 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3450,7 +3450,7 @@ int ieee80211_attach_ack_skb(struct ieee80211_local 
*local, struct sk_buff *skb,
+ 
+       spin_lock_irqsave(&local->ack_status_lock, spin_flags);
+       id = idr_alloc(&local->ack_status_frames, ack_skb,
+-                     1, 0x40, GFP_ATOMIC);
++                     1, 0x2000, GFP_ATOMIC);
+       spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
+ 
+       if (id < 0) {
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 5fa13176036f..e041af2f021a 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -8,7 +8,7 @@
+  * Copyright 2007, Michael Wu <flaming...@sourmilk.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2019 Intel Corporation
++ * Copyright (C) 2018 - 2020 Intel Corporation
+  */
+ 
+ #include <linux/delay.h>
+@@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct 
ieee80211_sub_if_data *sdata,
+       if (!res) {
+               ch_switch.timestamp = timestamp;
+               ch_switch.device_timestamp = device_timestamp;
+-              ch_switch.block_tx =  beacon ? csa_ie.mode : 0;
++              ch_switch.block_tx = csa_ie.mode;
+               ch_switch.chandef = csa_ie.chandef;
+               ch_switch.count = csa_ie.count;
+               ch_switch.delay = csa_ie.max_switch_time;
+@@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct 
ieee80211_sub_if_data *sdata,
+ 
+       sdata->vif.csa_active = true;
+       sdata->csa_chandef = csa_ie.chandef;
+-      sdata->csa_block_tx = ch_switch.block_tx;
++      sdata->csa_block_tx = csa_ie.mode;
+       ifmgd->csa_ignored_same_chan = false;
+ 
+       if (sdata->csa_block_tx)
+@@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct 
ieee80211_sub_if_data *sdata,
+        * reset when the disconnection worker runs.
+        */
+       sdata->vif.csa_active = true;
+-      sdata->csa_block_tx = ch_switch.block_tx;
++      sdata->csa_block_tx = csa_ie.mode;
+ 
+       ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
+       mutex_unlock(&local->chanctx_mtx);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index a8a7306a1f56..b0444e4aba2a 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2442,7 +2442,7 @@ static int ieee80211_store_ack_skb(struct 
ieee80211_local *local,
+ 
+               spin_lock_irqsave(&local->ack_status_lock, flags);
+               id = idr_alloc(&local->ack_status_frames, ack_skb,
+-                             1, 0x40, GFP_ATOMIC);
++                             1, 0x2000, GFP_ATOMIC);
+               spin_unlock_irqrestore(&local->ack_status_lock, flags);
+ 
+               if (id >= 0) {
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index 523722be6a16..45366570ea65 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -298,8 +298,8 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt 
*r_xprt,
+ {
+       struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+       struct ib_reg_wr *reg_wr;
++      int i, n, dma_nents;
+       struct ib_mr *ibmr;
+-      int i, n;
+       u8 key;
+ 
+       if (nsegs > ia->ri_max_frwr_depth)
+@@ -323,15 +323,16 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt 
*r_xprt,
+                       break;
+       }
+       mr->mr_dir = rpcrdma_data_dir(writing);
++      mr->mr_nents = i;
+ 
+-      mr->mr_nents =
+-              ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir);
+-      if (!mr->mr_nents)
++      dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents,
++                                mr->mr_dir);
++      if (!dma_nents)
+               goto out_dmamap_err;
+ 
+       ibmr = mr->frwr.fr_mr;
+-      n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
+-      if (unlikely(n != mr->mr_nents))
++      n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
++      if (n != dma_nents)
+               goto out_mapmr_err;
+ 
+       ibmr->iova &= 0x00000000ffffffff;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index c9b7e1a073ee..df40d38f6e29 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -2474,7 +2474,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream 
*substream)
+ 
+       snd_pcm_drop(substream);
+       if (substream->hw_opened) {
+-              do_hw_free(substream);
++              if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
++                      do_hw_free(substream);
+               substream->ops->close(substream);
+               substream->hw_opened = 0;
+       }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f2ea3528bfb1..128d81b4140b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+       SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", 
ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", 
ALC1220_FIXUP_GB_DUAL_CODECS),
++      SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+       SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", 
ALC1220_FIXUP_GB_DUAL_CODECS),
+       SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+@@ -5701,8 +5702,11 @@ static void alc_fixup_headset_jack(struct hda_codec 
*codec,
+               break;
+       case HDA_FIXUP_ACT_INIT:
+               switch (codec->core.vendor_id) {
++              case 0x10ec0215:
+               case 0x10ec0225:
++              case 0x10ec0285:
+               case 0x10ec0295:
++              case 0x10ec0289:
+               case 0x10ec0299:
+                       alc_write_coef_idx(codec, 0x48, 0xd011);
+                       alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 018b1ecb5404..a48313dfa967 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -151,8 +151,34 @@ static int uac_clock_selector_set_val(struct 
snd_usb_audio *chip, int selector_i
+       return ret;
+ }
+ 
++/*
++ * Assume the clock is valid if clock source supports only one single sample
++ * rate, the terminal is connected directly to it (there is no clock selector)
++ * and clock type is internal. This is to deal with some Denon DJ controllers
++ * that always reports that clock is invalid.
++ */
++static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
++                                          struct audioformat *fmt,
++                                          int source_id)
++{
++      if (fmt->protocol == UAC_VERSION_2) {
++              struct uac_clock_source_descriptor *cs_desc =
++                      snd_usb_find_clock_source(chip->ctrl_intf, source_id);
++
++              if (!cs_desc)
++                      return false;
++
++              return (fmt->nr_rates == 1 &&
++                      (fmt->clock & 0xff) == cs_desc->bClockID &&
++                      (cs_desc->bmAttributes & 0x3) !=
++                              UAC_CLOCK_SOURCE_TYPE_EXT);
++      }
++
++      return false;
++}
++
+ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
+-                                    int protocol,
++                                    struct audioformat *fmt,
+                                     int source_id)
+ {
+       int err;
+@@ -160,7 +186,7 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio 
*chip,
+       struct usb_device *dev = chip->dev;
+       u32 bmControls;
+ 
+-      if (protocol == UAC_VERSION_3) {
++      if (fmt->protocol == UAC_VERSION_3) {
+               struct uac3_clock_source_descriptor *cs_desc =
+                       snd_usb_find_clock_source_v3(chip->ctrl_intf, 
source_id);
+ 
+@@ -194,10 +220,14 @@ static bool uac_clock_source_is_valid(struct 
snd_usb_audio *chip,
+               return false;
+       }
+ 
+-      return data ? true :  false;
++      if (data)
++              return true;
++      else
++              return uac_clock_source_is_valid_quirk(chip, fmt, source_id);
+ }
+ 
+-static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
++static int __uac_clock_find_source(struct snd_usb_audio *chip,
++                                 struct audioformat *fmt, int entity_id,
+                                  unsigned long *visited, bool validate)
+ {
+       struct uac_clock_source_descriptor *source;
+@@ -217,7 +247,7 @@ static int __uac_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+       source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id);
+       if (source) {
+               entity_id = source->bClockID;
+-              if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2,
++              if (validate && !uac_clock_source_is_valid(chip, fmt,
+                                                               entity_id)) {
+                       usb_audio_err(chip,
+                               "clock source %d is not valid, cannot use\n",
+@@ -248,8 +278,9 @@ static int __uac_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+               }
+ 
+               cur = ret;
+-              ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 
1],
+-                                             visited, validate);
++              ret = __uac_clock_find_source(chip, fmt,
++                                            selector->baCSourceID[ret - 1],
++                                            visited, validate);
+               if (!validate || ret > 0 || !chip->autoclock)
+                       return ret;
+ 
+@@ -260,8 +291,9 @@ static int __uac_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+                       if (i == cur)
+                               continue;
+ 
+-                      ret = __uac_clock_find_source(chip, 
selector->baCSourceID[i - 1],
+-                              visited, true);
++                      ret = __uac_clock_find_source(chip, fmt,
++                                                    selector->baCSourceID[i - 
1],
++                                                    visited, true);
+                       if (ret < 0)
+                               continue;
+ 
+@@ -281,14 +313,16 @@ static int __uac_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+       /* FIXME: multipliers only act as pass-thru element for now */
+       multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id);
+       if (multiplier)
+-              return __uac_clock_find_source(chip, multiplier->bCSourceID,
+-                                              visited, validate);
++              return __uac_clock_find_source(chip, fmt,
++                                             multiplier->bCSourceID,
++                                             visited, validate);
+ 
+       return -EINVAL;
+ }
+ 
+-static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
+-                                 unsigned long *visited, bool validate)
++static int __uac3_clock_find_source(struct snd_usb_audio *chip,
++                                  struct audioformat *fmt, int entity_id,
++                                  unsigned long *visited, bool validate)
+ {
+       struct uac3_clock_source_descriptor *source;
+       struct uac3_clock_selector_descriptor *selector;
+@@ -307,7 +341,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+       source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id);
+       if (source) {
+               entity_id = source->bClockID;
+-              if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3,
++              if (validate && !uac_clock_source_is_valid(chip, fmt,
+                                                               entity_id)) {
+                       usb_audio_err(chip,
+                               "clock source %d is not valid, cannot use\n",
+@@ -338,7 +372,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+               }
+ 
+               cur = ret;
+-              ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret 
- 1],
++              ret = __uac3_clock_find_source(chip, fmt,
++                                             selector->baCSourceID[ret - 1],
+                                              visited, validate);
+               if (!validate || ret > 0 || !chip->autoclock)
+                       return ret;
+@@ -350,8 +385,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+                       if (i == cur)
+                               continue;
+ 
+-                      ret = __uac3_clock_find_source(chip, 
selector->baCSourceID[i - 1],
+-                              visited, true);
++                      ret = __uac3_clock_find_source(chip, fmt,
++                                                     selector->baCSourceID[i 
- 1],
++                                                     visited, true);
+                       if (ret < 0)
+                               continue;
+ 
+@@ -372,7 +408,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+       multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf,
+                                                     entity_id);
+       if (multiplier)
+-              return __uac3_clock_find_source(chip, multiplier->bCSourceID,
++              return __uac3_clock_find_source(chip, fmt,
++                                              multiplier->bCSourceID,
+                                               visited, validate);
+ 
+       return -EINVAL;
+@@ -389,18 +426,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio 
*chip, int entity_id,
+  *
+  * Returns the clock source UnitID (>=0) on success, or an error.
+  */
+-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
+-                            int entity_id, bool validate)
++int snd_usb_clock_find_source(struct snd_usb_audio *chip,
++                            struct audioformat *fmt, bool validate)
+ {
+       DECLARE_BITMAP(visited, 256);
+       memset(visited, 0, sizeof(visited));
+ 
+-      switch (protocol) {
++      switch (fmt->protocol) {
+       case UAC_VERSION_2:
+-              return __uac_clock_find_source(chip, entity_id, visited,
++              return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
+                                              validate);
+       case UAC_VERSION_3:
+-              return __uac3_clock_find_source(chip, entity_id, visited,
++              return __uac3_clock_find_source(chip, fmt, fmt->clock, visited,
+                                              validate);
+       default:
+               return -EINVAL;
+@@ -501,8 +538,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio 
*chip, int iface,
+        * automatic clock selection if the current clock is not
+        * valid.
+        */
+-      clock = snd_usb_clock_find_source(chip, fmt->protocol,
+-                                        fmt->clock, true);
++      clock = snd_usb_clock_find_source(chip, fmt, true);
+       if (clock < 0) {
+               /* We did not find a valid clock, but that might be
+                * because the current sample rate does not match an
+@@ -510,8 +546,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio 
*chip, int iface,
+                * and we will do another validation after setting the
+                * rate.
+                */
+-              clock = snd_usb_clock_find_source(chip, fmt->protocol,
+-                                                fmt->clock, false);
++              clock = snd_usb_clock_find_source(chip, fmt, false);
+               if (clock < 0)
+                       return clock;
+       }
+@@ -577,7 +612,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio 
*chip, int iface,
+ 
+ validation:
+       /* validate clock after rate change */
+-      if (!uac_clock_source_is_valid(chip, fmt->protocol, clock))
++      if (!uac_clock_source_is_valid(chip, fmt, clock))
+               return -ENXIO;
+       return 0;
+ }
+diff --git a/sound/usb/clock.h b/sound/usb/clock.h
+index 076e31b79ee0..68df0fbe09d0 100644
+--- a/sound/usb/clock.h
++++ b/sound/usb/clock.h
+@@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int 
iface,
+                            struct usb_host_interface *alts,
+                            struct audioformat *fmt, int rate);
+ 
+-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
+-                           int entity_id, bool validate);
++int snd_usb_clock_find_source(struct snd_usb_audio *chip,
++                            struct audioformat *fmt, bool validate);
+ 
+ #endif /* __USBAUDIO_CLOCK_H */
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index d79db71305f6..25668ba5e68e 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -322,8 +322,7 @@ static int parse_audio_format_rates_v2v3(struct 
snd_usb_audio *chip,
+       struct usb_device *dev = chip->dev;
+       unsigned char tmp[2], *data;
+       int nr_triplets, data_size, ret = 0, ret_l6;
+-      int clock = snd_usb_clock_find_source(chip, fp->protocol,
+-                                            fp->clock, false);
++      int clock = snd_usb_clock_find_source(chip, fp, false);
+ 
+       if (clock < 0) {
+               dev_err(&dev->dev,
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 6cd4ff09c5ee..d2a050bb8341 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -897,6 +897,15 @@ static int parse_term_proc_unit(struct mixer_build *state,
+       return 0;
+ }
+ 
++static int parse_term_effect_unit(struct mixer_build *state,
++                                struct usb_audio_term *term,
++                                void *p1, int id)
++{
++      term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
++      term->id = id;
++      return 0;
++}
++
+ static int parse_term_uac2_clock_source(struct mixer_build *state,
+                                       struct usb_audio_term *term,
+                                       void *p1, int id)
+@@ -981,8 +990,7 @@ static int __check_input_term(struct mixer_build *state, 
int id,
+                                                   UAC3_PROCESSING_UNIT);
+               case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT):
+               case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT):
+-                      return parse_term_proc_unit(state, term, p1, id,
+-                                                  UAC3_EFFECT_UNIT);
++                      return parse_term_effect_unit(state, term, p1, id);
+               case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT):
+               case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2):
+               case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT):
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 82184036437b..1ed25b1d2a6a 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1402,6 +1402,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio 
*chip)
+       case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
+       case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
+       case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
++      case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
+               return true;
+       }
+ 
+diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
+index 2c41d47f6f83..90d23cc3c8d4 100644
+--- a/tools/perf/util/stat-shadow.c
++++ b/tools/perf/util/stat-shadow.c
+@@ -18,7 +18,6 @@
+  * AGGR_NONE: Use matching CPU
+  * AGGR_THREAD: Not supported?
+  */
+-static bool have_frontend_stalled;
+ 
+ struct runtime_stat rt_stat;
+ struct stats walltime_nsecs_stats;
+@@ -144,7 +143,6 @@ void runtime_stat__exit(struct runtime_stat *st)
+ 
+ void perf_stat__init_shadow_stats(void)
+ {
+-      have_frontend_stalled = pmu_have_event("cpu", 
"stalled-cycles-frontend");
+       runtime_stat__init(&rt_stat);
+ }
+ 
+@@ -853,10 +851,6 @@ void perf_stat__print_shadow_stats(struct 
perf_stat_config *config,
+                       print_metric(config, ctxp, NULL, "%7.2f ",
+                                       "stalled cycles per insn",
+                                       ratio);
+-              } else if (have_frontend_stalled) {
+-                      out->new_line(config, ctxp);
+-                      print_metric(config, ctxp, NULL, "%7.2f ",
+-                                   "stalled cycles per insn", 0);
+               }
+       } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
+               if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)

Reply via email to