commit:     003f68bb0343eec1d19eec6a761fae1405ddad26
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 25 10:28:04 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 25 10:28:04 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=003f68bb

Linux patch 4.17.10

 0000_README              |    4 +
 1009_linux-4.17.10.patch | 2457 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2461 insertions(+)

diff --git a/0000_README b/0000_README
index 378d9da..148c985 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-4.17.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.17.9
 
+Patch:  1009_linux-4.17.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.17.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-4.17.10.patch b/1009_linux-4.17.10.patch
new file mode 100644
index 0000000..dc09395
--- /dev/null
+++ b/1009_linux-4.17.10.patch
@@ -0,0 +1,2457 @@
+diff --git a/Makefile b/Makefile
+index 693fde3aa317..0ab689c38e82 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 17
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Merciless Moray
+ 
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index 89faa6f4de47..6a92843c0699 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1183,13 +1183,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct 
rusage32 __user *, ru)
+ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
+               struct rusage32 __user *, ur)
+ {
+-      unsigned int status = 0;
+       struct rusage r;
+-      long err = kernel_wait4(pid, &status, options, &r);
++      long err = kernel_wait4(pid, ustatus, options, &r);
+       if (err <= 0)
+               return err;
+-      if (put_user(status, ustatus))
+-              return -EFAULT;
+       if (!ur)
+               return err;
+       if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index d76bf4a83740..bc0bcf01ec98 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -408,7 +408,7 @@ config ARC_HAS_DIV_REM
+ 
+ config ARC_HAS_ACCL_REGS
+       bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
+-      default n
++      default y
+       help
+         Depending on the configuration, CPU can contain accumulator reg-pair
+         (also referred to as r58:r59). These can also be used by gcc as GPR so
+diff --git a/arch/arc/configs/axs101_defconfig 
b/arch/arc/configs/axs101_defconfig
+index 09f85154c5a4..a635ea972304 100644
+--- a/arch/arc/configs/axs101_defconfig
++++ b/arch/arc/configs/axs101_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+ # CONFIG_VM_EVENT_COUNTERS is not set
+diff --git a/arch/arc/configs/axs103_defconfig 
b/arch/arc/configs/axs103_defconfig
+index 09fed3ef22b6..aa507e423075 100644
+--- a/arch/arc/configs/axs103_defconfig
++++ b/arch/arc/configs/axs103_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+ # CONFIG_VM_EVENT_COUNTERS is not set
+diff --git a/arch/arc/configs/axs103_smp_defconfig 
b/arch/arc/configs/axs103_smp_defconfig
+index ea2f6d817d1a..eba07f468654 100644
+--- a/arch/arc/configs/axs103_smp_defconfig
++++ b/arch/arc/configs/axs103_smp_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+ # CONFIG_VM_EVENT_COUNTERS is not set
+diff --git a/arch/arc/configs/haps_hs_defconfig 
b/arch/arc/configs/haps_hs_defconfig
+index ab231c040efe..098b19fbaa51 100644
+--- a/arch/arc/configs/haps_hs_defconfig
++++ b/arch/arc/configs/haps_hs_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+ CONFIG_EXPERT=y
+ CONFIG_PERF_EVENTS=y
+ # CONFIG_COMPAT_BRK is not set
+diff --git a/arch/arc/configs/haps_hs_smp_defconfig 
b/arch/arc/configs/haps_hs_smp_defconfig
+index cf449cbf440d..0104c404d897 100644
+--- a/arch/arc/configs/haps_hs_smp_defconfig
++++ b/arch/arc/configs/haps_hs_smp_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+ # CONFIG_VM_EVENT_COUNTERS is not set
+diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
+index 1b54c72f4296..6491be0ddbc9 100644
+--- a/arch/arc/configs/hsdk_defconfig
++++ b/arch/arc/configs/hsdk_defconfig
+@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+ # CONFIG_VM_EVENT_COUNTERS is not set
+diff --git a/arch/arc/configs/nsim_700_defconfig 
b/arch/arc/configs/nsim_700_defconfig
+index 31c2c70b34a1..99e05cf63fca 100644
+--- a/arch/arc/configs/nsim_700_defconfig
++++ b/arch/arc/configs/nsim_700_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
+ CONFIG_KALLSYMS_ALL=y
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+diff --git a/arch/arc/configs/nsim_hs_defconfig 
b/arch/arc/configs/nsim_hs_defconfig
+index a578c721d50f..0dc4f9b737e7 100644
+--- a/arch/arc/configs/nsim_hs_defconfig
++++ b/arch/arc/configs/nsim_hs_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+ CONFIG_KALLSYMS_ALL=y
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+diff --git a/arch/arc/configs/nsim_hs_smp_defconfig 
b/arch/arc/configs/nsim_hs_smp_defconfig
+index 37d7395f3272..be3c30a15e54 100644
+--- a/arch/arc/configs/nsim_hs_smp_defconfig
++++ b/arch/arc/configs/nsim_hs_smp_defconfig
+@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+ CONFIG_KALLSYMS_ALL=y
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+diff --git a/arch/arc/configs/nsimosci_defconfig 
b/arch/arc/configs/nsimosci_defconfig
+index 1e1470e2a7f0..3a74b9b21772 100644
+--- a/arch/arc/configs/nsimosci_defconfig
++++ b/arch/arc/configs/nsimosci_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
+ CONFIG_KALLSYMS_ALL=y
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+diff --git a/arch/arc/configs/nsimosci_hs_defconfig 
b/arch/arc/configs/nsimosci_hs_defconfig
+index 084a6e42685b..ea2834b4dc1d 100644
+--- a/arch/arc/configs/nsimosci_hs_defconfig
++++ b/arch/arc/configs/nsimosci_hs_defconfig
+@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+ CONFIG_KALLSYMS_ALL=y
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig 
b/arch/arc/configs/nsimosci_hs_smp_defconfig
+index f36d47990415..80a5a1b4924b 100644
+--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
++++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
+@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+ CONFIG_PERF_EVENTS=y
+ # CONFIG_COMPAT_BRK is not set
+ CONFIG_KPROBES=y
+diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
+index 109baa06831c..09ddddf71cc5 100644
+--- a/arch/arc/include/asm/page.h
++++ b/arch/arc/include/asm/page.h
+@@ -105,7 +105,7 @@ typedef pte_t * pgtable_t;
+ #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
+ 
+ /* Default Permissions for stack/heaps pages (Non Executable) */
+-#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | 
VM_MAYWRITE)
++#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | 
VM_MAYWRITE | VM_MAYEXEC)
+ 
+ #define WANT_PAGE_VIRTUAL   1
+ 
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 08fe33830d4b..77676e18da69 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -379,7 +379,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned 
long address,
+ 
+ /* Decode a PTE containing swap "identifier "into constituents */
+ #define __swp_type(pte_lookalike)     (((pte_lookalike).val) & 0x1f)
+-#define __swp_offset(pte_lookalike)   ((pte_lookalike).val << 13)
++#define __swp_offset(pte_lookalike)   ((pte_lookalike).val >> 13)
+ 
+ /* NOPs, to keep generic kernel happy */
+ #define __pte_to_swp_entry(pte)       ((swp_entry_t) { pte_val(pte) })
+diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
+index 19ab3cf98f0f..fcc9a9e27e9c 100644
+--- a/arch/arc/plat-hsdk/Kconfig
++++ b/arch/arc/plat-hsdk/Kconfig
+@@ -7,5 +7,7 @@
+ 
+ menuconfig ARC_SOC_HSDK
+       bool "ARC HS Development Kit SOC"
++      depends on ISA_ARCV2
++      select ARC_HAS_ACCL_REGS
+       select CLK_HSDK
+       select RESET_HSDK
+diff --git a/arch/powerpc/kernel/idle_book3s.S 
b/arch/powerpc/kernel/idle_book3s.S
+index e734f6e45abc..689306118b48 100644
+--- a/arch/powerpc/kernel/idle_book3s.S
++++ b/arch/powerpc/kernel/idle_book3s.S
+@@ -144,7 +144,9 @@ power9_restore_additional_sprs:
+       mtspr   SPRN_MMCR1, r4
+ 
+       ld      r3, STOP_MMCR2(r13)
++      ld      r4, PACA_SPRG_VDSO(r13)
+       mtspr   SPRN_MMCR2, r3
++      mtspr   SPRN_SPRG3, r4
+       blr
+ 
+ /*
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 8a10a045b57b..8cf03f101938 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu)
+       ds->bts_buffer_base = (unsigned long) cea;
+       ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
+       ds->bts_index = ds->bts_buffer_base;
+-      max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
+-      ds->bts_absolute_maximum = ds->bts_buffer_base + max;
+-      ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
++      max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
++      ds->bts_absolute_maximum = ds->bts_buffer_base +
++                                      max * BTS_RECORD_SIZE;
++      ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
++                                      (max / 16) * BTS_RECORD_SIZE;
+       return 0;
+ }
+ 
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index c356098b6fb9..4d4015ddcf26 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -7,8 +7,6 @@
+ #ifndef _ASM_X86_MACH_DEFAULT_APM_H
+ #define _ASM_X86_MACH_DEFAULT_APM_H
+ 
+-#include <asm/nospec-branch.h>
+-
+ #ifdef APM_ZERO_SEGS
+ #     define APM_DO_ZERO_SEGS \
+               "pushl %%ds\n\t" \
+@@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, 
u32 ecx_in,
+        * N.B. We do NOT need a cld after the BIOS call
+        * because we always save and restore the flags.
+        */
+-      firmware_restrict_branch_speculation_start();
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+@@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, 
u32 ecx_in,
+                 "=S" (*esi)
+               : "a" (func), "b" (ebx_in), "c" (ecx_in)
+               : "memory", "cc");
+-      firmware_restrict_branch_speculation_end();
+ }
+ 
+ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+@@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 
ebx_in,
+        * N.B. We do NOT need a cld after the BIOS call
+        * because we always save and restore the flags.
+        */
+-      firmware_restrict_branch_speculation_start();
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+@@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 
ebx_in,
+                 "=S" (si)
+               : "a" (func), "b" (ebx_in), "c" (ecx_in)
+               : "memory", "cc");
+-      firmware_restrict_branch_speculation_end();
+       return error;
+ }
+ 
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index dfcbe6924eaf..3dd661dcc3f7 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -240,6 +240,7 @@
+ #include <asm/olpc.h>
+ #include <asm/paravirt.h>
+ #include <asm/reboot.h>
++#include <asm/nospec-branch.h>
+ 
+ #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
+ extern int (*console_blank_hook)(int);
+@@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call)
+       gdt[0x40 / 8] = bad_bios_desc;
+ 
+       apm_irq_save(flags);
++      firmware_restrict_branch_speculation_start();
+       APM_DO_SAVE_SEGS;
+       apm_bios_call_asm(call->func, call->ebx, call->ecx,
+                         &call->eax, &call->ebx, &call->ecx, &call->edx,
+                         &call->esi);
+       APM_DO_RESTORE_SEGS;
++      firmware_restrict_branch_speculation_end();
+       apm_irq_restore(flags);
+       gdt[0x40 / 8] = save_desc_40;
+       put_cpu();
+@@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call)
+       gdt[0x40 / 8] = bad_bios_desc;
+ 
+       apm_irq_save(flags);
++      firmware_restrict_branch_speculation_start();
+       APM_DO_SAVE_SEGS;
+       error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
+                                        &call->eax);
+       APM_DO_RESTORE_SEGS;
++      firmware_restrict_branch_speculation_end();
+       apm_irq_restore(flags);
+       gdt[0x40 / 8] = save_desc_40;
+       put_cpu();
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c 
b/arch/x86/kernel/cpu/mcheck/mce.c
+index 6f7eda9d5297..79ae1423b619 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -2147,9 +2147,6 @@ static ssize_t store_int_with_restart(struct device *s,
+       if (check_interval == old_check_interval)
+               return ret;
+ 
+-      if (check_interval < 1)
+-              check_interval = 1;
+-
+       mutex_lock(&mce_sysfs_mutex);
+       mce_restart();
+       mutex_unlock(&mce_sysfs_mutex);
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 8b26c9e01cc4..d79a18b4cf9d 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -319,6 +319,8 @@ void __init kvmclock_init(void)
+       printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+               msr_kvm_system_time, msr_kvm_wall_clock);
+ 
++      pvclock_set_pvti_cpu0_va(hv_clock);
++
+       if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
+               pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
+ 
+@@ -366,14 +368,11 @@ int __init kvm_setup_vsyscall_timeinfo(void)
+       vcpu_time = &hv_clock[cpu].pvti;
+       flags = pvclock_read_flags(vcpu_time);
+ 
+-      if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
+-              put_cpu();
+-              return 1;
+-      }
+-
+-      pvclock_set_pvti_cpu0_va(hv_clock);
+       put_cpu();
+ 
++      if (!(flags & PVCLOCK_TSC_STABLE_BIT))
++              return 1;
++
+       kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+ #endif
+       return 0;
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index dd4366edc200..a3bbac8ef4d0 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2376,6 +2376,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+ #ifdef CONFIG_X86_64
+       int cpu = raw_smp_processor_id();
++      unsigned long fs_base, kernel_gs_base;
+ #endif
+       int i;
+ 
+@@ -2391,12 +2392,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+       vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
+ 
+ #ifdef CONFIG_X86_64
+-      save_fsgs_for_kvm();
+-      vmx->host_state.fs_sel = current->thread.fsindex;
+-      vmx->host_state.gs_sel = current->thread.gsindex;
+-#else
+-      savesegment(fs, vmx->host_state.fs_sel);
+-      savesegment(gs, vmx->host_state.gs_sel);
++      if (likely(is_64bit_mm(current->mm))) {
++              save_fsgs_for_kvm();
++              vmx->host_state.fs_sel = current->thread.fsindex;
++              vmx->host_state.gs_sel = current->thread.gsindex;
++              fs_base = current->thread.fsbase;
++              kernel_gs_base = current->thread.gsbase;
++      } else {
++#endif
++              savesegment(fs, vmx->host_state.fs_sel);
++              savesegment(gs, vmx->host_state.gs_sel);
++#ifdef CONFIG_X86_64
++              fs_base = read_msr(MSR_FS_BASE);
++              kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
++      }
+ #endif
+       if (!(vmx->host_state.fs_sel & 7)) {
+               vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
+@@ -2416,10 +2425,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+       savesegment(ds, vmx->host_state.ds_sel);
+       savesegment(es, vmx->host_state.es_sel);
+ 
+-      vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
++      vmcs_writel(HOST_FS_BASE, fs_base);
+       vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
+ 
+-      vmx->msr_host_kernel_gs_base = current->thread.gsbase;
++      vmx->msr_host_kernel_gs_base = kernel_gs_base;
+       if (is_long_mode(&vmx->vcpu))
+               wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ #else
+@@ -4110,11 +4119,7 @@ static __init int setup_vmcs_config(struct vmcs_config 
*vmcs_conf)
+       vmcs_conf->order = get_order(vmcs_conf->size);
+       vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
+ 
+-      /* KVM supports Enlightened VMCS v1 only */
+-      if (static_branch_unlikely(&enable_evmcs))
+-              vmcs_conf->revision_id = KVM_EVMCS_VERSION;
+-      else
+-              vmcs_conf->revision_id = vmx_msr_low;
++      vmcs_conf->revision_id = vmx_msr_low;
+ 
+       vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
+       vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
+@@ -4184,7 +4189,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
+               return NULL;
+       vmcs = page_address(pages);
+       memset(vmcs, 0, vmcs_config.size);
+-      vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
++
++      /* KVM supports Enlightened VMCS v1 only */
++      if (static_branch_unlikely(&enable_evmcs))
++              vmcs->revision_id = KVM_EVMCS_VERSION;
++      else
++              vmcs->revision_id = vmcs_config.revision_id;
++
+       return vmcs;
+ }
+ 
+@@ -4343,6 +4354,19 @@ static __init int alloc_kvm_area(void)
+                       return -ENOMEM;
+               }
+ 
++              /*
++               * When eVMCS is enabled, alloc_vmcs_cpu() sets
++               * vmcs->revision_id to KVM_EVMCS_VERSION instead of
++               * revision_id reported by MSR_IA32_VMX_BASIC.
++               *
++               * However, even though not explictly documented by
++               * TLFS, VMXArea passed as VMXON argument should
++               * still be marked with revision_id reported by
++               * physical CPU.
++               */
++              if (static_branch_unlikely(&enable_evmcs))
++                      vmcs->revision_id = vmcs_config.revision_id;
++
+               per_cpu(vmxarea, cpu) = vmcs;
+       }
+       return 0;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index bd3f0a9d5e60..b357f81bfba6 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2179,6 +2179,18 @@ static bool __init intel_pstate_no_acpi_pss(void)
+       return true;
+ }
+ 
++static bool __init intel_pstate_no_acpi_pcch(void)
++{
++      acpi_status status;
++      acpi_handle handle;
++
++      status = acpi_get_handle(NULL, "\\_SB", &handle);
++      if (ACPI_FAILURE(status))
++              return true;
++
++      return !acpi_has_method(handle, "PCCH");
++}
++
+ static bool __init intel_pstate_has_acpi_ppc(void)
+ {
+       int i;
+@@ -2238,7 +2250,10 @@ static bool __init 
intel_pstate_platform_pwr_mgmt_exists(void)
+ 
+       switch (plat_info[idx].data) {
+       case PSS:
+-              return intel_pstate_no_acpi_pss();
++              if (!intel_pstate_no_acpi_pss())
++                      return false;
++
++              return intel_pstate_no_acpi_pcch();
+       case PPC:
+               return intel_pstate_has_acpi_ppc() && !force_load;
+       }
+diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
+index 3f0ce2ae35ee..0c56c9759672 100644
+--- a/drivers/cpufreq/pcc-cpufreq.c
++++ b/drivers/cpufreq/pcc-cpufreq.c
+@@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void)
+ {
+       int ret;
+ 
++      /* Skip initialization if another cpufreq driver is there. */
++      if (cpufreq_get_current_driver())
++              return 0;
++
+       if (acpi_disabled)
+               return 0;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index dc34b50e6b29..b11e9659e312 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -925,6 +925,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device 
*adev,
+               r = amdgpu_bo_vm_update_pte(p);
+               if (r)
+                       return r;
++
++              r = 
reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
++              if (r)
++                      return r;
+       }
+ 
+       return amdgpu_cs_sync_rings(p);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 4304d9e408b8..ace9ad578ca0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+       enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+               I2C_MOT_TRUE : I2C_MOT_FALSE;
+       enum ddc_result res;
+-      uint32_t read_bytes = msg->size;
++      ssize_t read_bytes;
+ 
+       if (WARN_ON(msg->size > 16))
+               return -E2BIG;
+ 
+       switch (msg->request & ~DP_AUX_I2C_MOT) {
+       case DP_AUX_NATIVE_READ:
+-              res = dal_ddc_service_read_dpcd_data(
++              read_bytes = dal_ddc_service_read_dpcd_data(
+                               TO_DM_AUX(aux)->ddc_service,
+                               false,
+                               I2C_MOT_UNDEF,
+                               msg->address,
+                               msg->buffer,
+-                              msg->size,
+-                              &read_bytes);
+-              break;
++                              msg->size);
++              return read_bytes;
+       case DP_AUX_NATIVE_WRITE:
+               res = dal_ddc_service_write_dpcd_data(
+                               TO_DM_AUX(aux)->ddc_service,
+@@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+                               msg->size);
+               break;
+       case DP_AUX_I2C_READ:
+-              res = dal_ddc_service_read_dpcd_data(
++              read_bytes = dal_ddc_service_read_dpcd_data(
+                               TO_DM_AUX(aux)->ddc_service,
+                               true,
+                               mot,
+                               msg->address,
+                               msg->buffer,
+-                              msg->size,
+-                              &read_bytes);
+-              break;
++                              msg->size);
++              return read_bytes;
+       case DP_AUX_I2C_WRITE:
+               res = dal_ddc_service_write_dpcd_data(
+                               TO_DM_AUX(aux)->ddc_service,
+@@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+                r == DDC_RESULT_SUCESSFULL);
+ #endif
+ 
+-      if (res != DDC_RESULT_SUCESSFULL)
+-              return -EIO;
+-      return read_bytes;
++      return msg->size;
+ }
+ 
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index ae48d603ebd6..49c2face1e7a 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data(
+       return ret;
+ }
+ 
+-enum ddc_result dal_ddc_service_read_dpcd_data(
++ssize_t dal_ddc_service_read_dpcd_data(
+       struct ddc_service *ddc,
+       bool i2c,
+       enum i2c_mot_mode mot,
+       uint32_t address,
+       uint8_t *data,
+-      uint32_t len,
+-      uint32_t *read)
++      uint32_t len)
+ {
+       struct aux_payload read_payload = {
+               .i2c_over_aux = i2c,
+@@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
+               .mot = mot
+       };
+ 
+-      *read = 0;
+-
+       if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+               BREAK_TO_DEBUGGER();
+               return DDC_RESULT_FAILED_INVALID_OPERATION;
+@@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
+               ddc->ctx->i2caux,
+               ddc->ddc_pin,
+               &command)) {
+-              *read = command.payloads->length;
+-              return DDC_RESULT_SUCESSFULL;
++              return (ssize_t)command.payloads->length;
+       }
+ 
+       return DDC_RESULT_FAILED_OPERATION;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h 
b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+index 30b3a08b91be..090b7a8dd67b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+@@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data(
+               uint8_t *read_buf,
+               uint32_t read_size);
+ 
+-enum ddc_result dal_ddc_service_read_dpcd_data(
++ssize_t dal_ddc_service_read_dpcd_data(
+               struct ddc_service *ddc,
+               bool i2c,
+               enum i2c_mot_mode mot,
+               uint32_t address,
+               uint8_t *data,
+-              uint32_t len,
+-              uint32_t *read);
++              uint32_t len);
+ 
+ enum ddc_result dal_ddc_service_write_dpcd_data(
+               struct ddc_service *ddc,
+diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
+index d345563fdff3..ce281d651ae8 100644
+--- a/drivers/gpu/drm/drm_lease.c
++++ b/drivers/gpu/drm/drm_lease.c
+@@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ 
+       /* Clone the lessor file to create a new file for us */
+       DRM_DEBUG_LEASE("Allocating lease file\n");
+-      path_get(&lessor_file->f_path);
+-      lessee_file = alloc_file(&lessor_file->f_path,
+-                               lessor_file->f_mode,
+-                               fops_get(lessor_file->f_inode->i_fop));
+-
++      lessee_file = filp_clone_open(lessor_file);
+       if (IS_ERR(lessee_file)) {
+               ret = PTR_ERR(lessee_file);
+               goto out_lessee;
+       }
+ 
+-      /* Initialize the new file for DRM */
+-      DRM_DEBUG_LEASE("Initializing the file with %p\n", 
lessee_file->f_op->open);
+-      ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
+-      if (ret)
+-              goto out_lessee_file;
+-
+       lessee_priv = lessee_file->private_data;
+-
+       /* Change the file to a master one */
+       drm_master_put(&lessee_priv->master);
+       lessee_priv->master = lessee;
+@@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
+       DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
+       return 0;
+ 
+-out_lessee_file:
+-      fput(lessee_file);
+-
+ out_lessee:
+       drm_master_put(&lessee);
+ 
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index b25cc5aa8fbe..d793b2bbd6c2 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -1967,10 +1967,38 @@ static void valleyview_pipestat_irq_handler(struct 
drm_i915_private *dev_priv,
+ 
+ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
+ {
+-      u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
++      u32 hotplug_status = 0, hotplug_status_mask;
++      int i;
++
++      if (IS_G4X(dev_priv) ||
++          IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
++              hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
++                      DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
++      else
++              hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
+ 
+-      if (hotplug_status)
++      /*
++       * We absolutely have to clear all the pending interrupt
++       * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
++       * interrupt bit won't have an edge, and the i965/g4x
++       * edge triggered IIR will not notice that an interrupt
++       * is still pending. We can't use PORT_HOTPLUG_EN to
++       * guarantee the edge as the act of toggling the enable
++       * bits can itself generate a new hotplug interrupt :(
++       */
++      for (i = 0; i < 10; i++) {
++              u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
++
++              if (tmp == 0)
++                      return hotplug_status;
++
++              hotplug_status |= tmp;
+               I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
++      }
++
++      WARN_ONCE(1,
++                "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
++                I915_READ(PORT_HOTPLUG_STAT));
+ 
+       return hotplug_status;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c 
b/drivers/gpu/drm/nouveau/nouveau_backlight.c
+index debbbf0fd4bd..408b955e5c39 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
+@@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nvif_device *device = &drm->client.device;
+       struct drm_connector *connector;
++      struct drm_connector_list_iter conn_iter;
+ 
+       INIT_LIST_HEAD(&drm->bl_connectors);
+ 
+@@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
+               return 0;
+       }
+ 
+-      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++      drm_connector_list_iter_begin(dev, &conn_iter);
++      drm_for_each_connector_iter(connector, &conn_iter) {
+               if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+                   connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+                       continue;
+@@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
+                       break;
+               }
+       }
+-
++      drm_connector_list_iter_end(&conn_iter);
+ 
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c 
b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 6ed9cb053dfa..359fecce8cc0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1208,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int 
index)
+       struct nouveau_display *disp = nouveau_display(dev);
+       struct nouveau_connector *nv_connector = NULL;
+       struct drm_connector *connector;
++      struct drm_connector_list_iter conn_iter;
+       int type, ret = 0;
+       bool dummy;
+ 
+-      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++      drm_connector_list_iter_begin(dev, &conn_iter);
++      nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+               nv_connector = nouveau_connector(connector);
+-              if (nv_connector->index == index)
++              if (nv_connector->index == index) {
++                      drm_connector_list_iter_end(&conn_iter);
+                       return connector;
++              }
+       }
++      drm_connector_list_iter_end(&conn_iter);
+ 
+       nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+       if (!nv_connector)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h 
b/drivers/gpu/drm/nouveau/nouveau_connector.h
+index a4d1a059bd3d..dc7454e7f19a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
+@@ -33,6 +33,7 @@
+ #include <drm/drm_encoder.h>
+ #include <drm/drm_dp_helper.h>
+ #include "nouveau_crtc.h"
++#include "nouveau_encoder.h"
+ 
+ struct nvkm_i2c_port;
+ 
+@@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
+       return container_of(con, struct nouveau_connector, base);
+ }
+ 
++static inline bool
++nouveau_connector_is_mst(struct drm_connector *connector)
++{
++      const struct nouveau_encoder *nv_encoder;
++      const struct drm_encoder *encoder;
++
++      if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
++              return false;
++
++      nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
++      if (!nv_encoder)
++              return false;
++
++      encoder = &nv_encoder->base.base;
++      return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
++}
++
++#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
++      drm_for_each_connector_iter(connector, iter) \
++              for_each_if(!nouveau_connector_is_mst(connector))
++
+ static inline struct nouveau_connector *
+ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+ {
+       struct drm_device *dev = nv_crtc->base.dev;
+       struct drm_connector *connector;
++      struct drm_connector_list_iter conn_iter;
++      struct nouveau_connector *nv_connector = NULL;
+       struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+ 
+-      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+-              if (connector->encoder && connector->encoder->crtc == crtc)
+-                      return nouveau_connector(connector);
++      drm_connector_list_iter_begin(dev, &conn_iter);
++      nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
++              if (connector->encoder && connector->encoder->crtc == crtc) {
++                      nv_connector = nouveau_connector(connector);
++                      break;
++              }
+       }
++      drm_connector_list_iter_end(&conn_iter);
+ 
+-      return NULL;
++      return nv_connector;
+ }
+ 
+ struct drm_connector *
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c 
b/drivers/gpu/drm/nouveau/nouveau_display.c
+index 009713404cc4..4cba117e81fc 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -406,6 +406,7 @@ nouveau_display_init(struct drm_device *dev)
+       struct nouveau_display *disp = nouveau_display(dev);
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct drm_connector *connector;
++      struct drm_connector_list_iter conn_iter;
+       int ret;
+ 
+       ret = disp->init(dev);
+@@ -413,10 +414,12 @@ nouveau_display_init(struct drm_device *dev)
+               return ret;
+ 
+       /* enable hotplug interrupts */
+-      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++      drm_connector_list_iter_begin(dev, &conn_iter);
++      nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+               struct nouveau_connector *conn = nouveau_connector(connector);
+               nvif_notify_get(&conn->hpd);
+       }
++      drm_connector_list_iter_end(&conn_iter);
+ 
+       /* enable flip completion events */
+       nvif_notify_get(&drm->flip);
+@@ -429,6 +432,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
+       struct nouveau_display *disp = nouveau_display(dev);
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct drm_connector *connector;
++      struct drm_connector_list_iter conn_iter;
+ 
+       if (!suspend) {
+               if (drm_drv_uses_atomic_modeset(dev))
+@@ -441,10 +445,12 @@ nouveau_display_fini(struct drm_device *dev, bool 
suspend)
+       nvif_notify_put(&drm->flip);
+ 
+       /* disable hotplug interrupts */
+-      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++      drm_connector_list_iter_begin(dev, &conn_iter);
++      nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+               struct nouveau_connector *conn = nouveau_connector(connector);
+               nvif_notify_put(&conn->hpd);
+       }
++      drm_connector_list_iter_end(&conn_iter);
+ 
+       drm_kms_helper_poll_disable(dev);
+       disp->fini(dev);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c 
b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index bbbf353682e1..0bffeb95b072 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -866,22 +866,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
+ static int
+ nouveau_pmops_runtime_idle(struct device *dev)
+ {
+-      struct pci_dev *pdev = to_pci_dev(dev);
+-      struct drm_device *drm_dev = pci_get_drvdata(pdev);
+-      struct nouveau_drm *drm = nouveau_drm(drm_dev);
+-      struct drm_crtc *crtc;
+-
+       if (!nouveau_pmops_runtime()) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
+ 
+-      list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
+-              if (crtc->enabled) {
+-                      DRM_DEBUG_DRIVER("failing to power off - crtc 
active\n");
+-                      return -EBUSY;
+-              }
+-      }
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_autosuspend(dev);
+       /* we don't want the main rpm_idle to call suspend - we want to 
autosuspend */
+diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
+index 753b1a698fc4..6b16946f9b05 100644
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -103,15 +103,15 @@ static struct file *cxl_getfile(const char *name,
+       d_instantiate(path.dentry, inode);
+ 
+       file = alloc_file(&path, OPEN_FMODE(flags), fops);
+-      if (IS_ERR(file))
+-              goto err_dput;
++      if (IS_ERR(file)) {
++              path_put(&path);
++              goto err_fs;
++      }
+       file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+       file->private_data = priv;
+ 
+       return file;
+ 
+-err_dput:
+-      path_put(&path);
+ err_inode:
+       iput(inode);
+ err_fs:
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h 
b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+index fc7383106946..91eb8910b1c9 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+@@ -63,8 +63,6 @@
+ 
+ #define AQ_CFG_NAPI_WEIGHT     64U
+ 
+-#define AQ_CFG_MULTICAST_ADDRESS_MAX     32U
+-
+ /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
+ 
+ #define AQ_NIC_FC_OFF    0U
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h 
b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+index a2d416b24ffc..2c6ebd91a9f2 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+@@ -98,6 +98,8 @@ struct aq_stats_s {
+ #define AQ_HW_MEDIA_TYPE_TP    1U
+ #define AQ_HW_MEDIA_TYPE_FIBRE 2U
+ 
++#define AQ_HW_MULTICAST_ADDRESS_MAX     32U
++
+ struct aq_hw_s {
+       atomic_t flags;
+       u8 rbl_enabled:1;
+@@ -177,7 +179,7 @@ struct aq_hw_ops {
+                                   unsigned int packet_filter);
+ 
+       int (*hw_multicast_list_set)(struct aq_hw_s *self,
+-                                   u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
++                                   u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
+                                    [ETH_ALEN],
+                                    u32 count);
+ 
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c 
b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+index ba5fe8c4125d..e3ae29e523f0 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+@@ -135,17 +135,10 @@ static int aq_ndev_set_mac_address(struct net_device 
*ndev, void *addr)
+ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
+ {
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+-      int err = 0;
+ 
+-      err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
+-      if (err < 0)
+-              return;
++      aq_nic_set_packet_filter(aq_nic, ndev->flags);
+ 
+-      if (netdev_mc_count(ndev)) {
+-              err = aq_nic_set_multicast_list(aq_nic, ndev);
+-              if (err < 0)
+-                      return;
+-      }
++      aq_nic_set_multicast_list(aq_nic, ndev);
+ }
+ 
+ static const struct net_device_ops aq_ndev_ops = {
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c 
b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index 1a1a6380c128..7a22d0257e04 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -563,34 +563,41 @@ int aq_nic_set_packet_filter(struct aq_nic_s *self, 
unsigned int flags)
+ 
+ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
+ {
++      unsigned int packet_filter = self->packet_filter;
+       struct netdev_hw_addr *ha = NULL;
+       unsigned int i = 0U;
+ 
+-      self->mc_list.count = 0U;
+-
+-      netdev_for_each_mc_addr(ha, ndev) {
+-              ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+-              ++self->mc_list.count;
++      self->mc_list.count = 0;
++      if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
++              packet_filter |= IFF_PROMISC;
++      } else {
++              netdev_for_each_uc_addr(ha, ndev) {
++                      ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+ 
+-              if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
+-                      break;
++                      if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
++                              break;
++              }
+       }
+ 
+-      if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
+-              /* Number of filters is too big: atlantic does not support this.
+-               * Force all multi filter to support this.
+-               * With this we disable all UC filters and setup "all pass"
+-               * multicast mask
+-               */
+-              self->packet_filter |= IFF_ALLMULTI;
+-              self->aq_nic_cfg.mc_list_count = 0;
+-              return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
+-                                                           
self->packet_filter);
++      if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
++              packet_filter |= IFF_ALLMULTI;
+       } else {
+-              return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+-                                                  self->mc_list.ar,
+-                                                  self->mc_list.count);
++              netdev_for_each_mc_addr(ha, ndev) {
++                      ether_addr_copy(self->mc_list.ar[i++], ha->addr);
++
++                      if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
++                              break;
++              }
++      }
++
++      if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
++              packet_filter |= IFF_MULTICAST;
++              self->mc_list.count = i;
++              self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
++                                                     self->mc_list.ar,
++                                                     self->mc_list.count);
+       }
++      return aq_nic_set_packet_filter(self, packet_filter);
+ }
+ 
+ int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h 
b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+index faa533a0ec47..fecfc401f95d 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+@@ -75,7 +75,7 @@ struct aq_nic_s {
+       struct aq_hw_link_status_s link_status;
+       struct {
+               u32 count;
+-              u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
++              u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+       } mc_list;
+ 
+       struct pci_dev *pdev;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 
b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+index 67e2f9fb9402..8cc6abadc03b 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+@@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s 
*self,
+ 
+ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
+                                          u8 ar_mac
+-                                         [AQ_CFG_MULTICAST_ADDRESS_MAX]
++                                         [AQ_HW_MULTICAST_ADDRESS_MAX]
+                                          [ETH_ALEN],
+                                          u32 count)
+ {
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 
b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+index 819f6bcf9b4e..956860a69797 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s 
*self,
+ 
+ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
+                                          u8 ar_mac
+-                                         [AQ_CFG_MULTICAST_ADDRESS_MAX]
++                                         [AQ_HW_MULTICAST_ADDRESS_MAX]
+                                          [ETH_ALEN],
+                                          u32 count)
+ {
+@@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s 
*self,
+ 
+               hw_atl_rpfl2_uc_flr_en_set(self,
+                                          
(self->aq_nic_cfg->is_mc_list_enabled),
+-                                  HW_ATL_B0_MAC_MIN + i);
++                                         HW_ATL_B0_MAC_MIN + i);
+       }
+ 
+       err = aq_hw_err_from_flags(self);
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c 
b/drivers/net/ethernet/broadcom/bcmsysport.c
+index f33b25fbca63..7db072fe5f22 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev)
+       if (!priv->is_lite)
+               priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+       else
+-              priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
+-                                 GIB_FCS_STRIP);
++              priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
++                                GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
+ 
+       phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+                               0, priv->phy_interface);
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h 
b/drivers/net/ethernet/broadcom/bcmsysport.h
+index d6e5d0cbf3a3..cf440b91fd04 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.h
++++ b/drivers/net/ethernet/broadcom/bcmsysport.h
+@@ -278,7 +278,8 @@ struct bcm_rsb {
+ #define  GIB_GTX_CLK_EXT_CLK          (0 << GIB_GTX_CLK_SEL_SHIFT)
+ #define  GIB_GTX_CLK_125MHZ           (1 << GIB_GTX_CLK_SEL_SHIFT)
+ #define  GIB_GTX_CLK_250MHZ           (2 << GIB_GTX_CLK_SEL_SHIFT)
+-#define  GIB_FCS_STRIP                        (1 << 6)
++#define  GIB_FCS_STRIP_SHIFT          6
++#define  GIB_FCS_STRIP                        (1 << GIB_FCS_STRIP_SHIFT)
+ #define  GIB_LCL_LOOP_EN              (1 << 7)
+ #define  GIB_LCL_LOOP_TXEN            (1 << 8)
+ #define  GIB_RMT_LOOP_EN              (1 << 9)
+diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
+index 9f59b1270a7c..3e0e7f18ecf9 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -9289,6 +9289,15 @@ static int tg3_chip_reset(struct tg3 *tp)
+ 
+       tg3_restore_clk(tp);
+ 
++      /* Increase the core clock speed to fix tx timeout issue for 5762
++       * with 100Mbps link speed.
++       */
++      if (tg3_asic_rev(tp) == ASIC_REV_5762) {
++              val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
++              tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
++                   TG3_CPMU_MAC_ORIDE_ENABLE);
++      }
++
+       /* Reprobe ASF enable state.  */
+       tg3_flag_clear(tp, ENABLE_ASF);
+       tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 5c613c6663da..2ca0f1dad54c 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv 
*priv,
+ {
+       const struct mlx4_en_frag_info *frag_info = priv->frag_info;
+       unsigned int truesize = 0;
++      bool release = true;
+       int nr, frag_size;
+       struct page *page;
+       dma_addr_t dma;
+-      bool release;
+ 
+       /* Collect used fragments while replacing them in the HW descriptors */
+       for (nr = 0;; frags++) {
+@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv 
*priv,
+                       release = page_count(page) != 1 ||
+                                 page_is_pfmemalloc(page) ||
+                                 page_to_nid(page) != numa_mem_id();
+-              } else {
++              } else if (!priv->rx_headroom) {
++                      /* rx_headroom for non XDP setup is always 0.
++                       * When XDP is set, the above condition will
++                       * guarantee page is always released.
++                       */
+                       u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
+ 
+                       frags->page_offset += sz_align;
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index c418113c6b20..c10ca3c20b36 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -1291,6 +1291,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
+       struct hv_device *device = netvsc_channel_to_device(channel);
+       struct net_device *ndev = hv_get_drvdata(device);
+       int work_done = 0;
++      int ret;
+ 
+       /* If starting a new interval */
+       if (!nvchan->desc)
+@@ -1302,16 +1303,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
+               nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
+       }
+ 
+-      /* If send of pending receive completions suceeded
+-       *   and did not exhaust NAPI budget this time
+-       *   and not doing busy poll
++      /* Send any pending receive completions */
++      ret = send_recv_completions(ndev, net_device, nvchan);
++
++      /* If it did not exhaust NAPI budget this time
++       *  and not doing busy poll
+        * then re-enable host interrupts
+-       *     and reschedule if ring is not empty.
++       *  and reschedule if ring is not empty
++       *   or sending receive completion failed.
+        */
+-      if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
+-          work_done < budget &&
++      if (work_done < budget &&
+           napi_complete_done(napi, work_done) &&
+-          hv_end_read(&channel->inbound) &&
++          (ret || hv_end_read(&channel->inbound)) &&
+           napi_schedule_prep(napi)) {
+               hv_begin_read(&channel->inbound);
+               __napi_schedule(napi);
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 9e4ba8e80a18..5aa081fda447 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1720,11 +1720,8 @@ EXPORT_SYMBOL(genphy_loopback);
+ 
+ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+ {
+-      /* The default values for phydev->supported are provided by the PHY
+-       * driver "features" member, we want to reset to sane defaults first
+-       * before supporting higher speeds.
+-       */
+-      phydev->supported &= PHY_DEFAULT_FEATURES;
++      phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
++                             PHY_10BT_FEATURES);
+ 
+       switch (max_speed) {
+       default:
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 3d4f7959dabb..b1b3d8f7e67d 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
+                                    priv->presvd_phy_advertise);
+ 
+               /* Restore BMCR */
++              if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
++                      priv->presvd_phy_bmcr |= BMCR_ANRESTART;
++
+               asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
+                                    priv->presvd_phy_bmcr);
+ 
+-              mii_nway_restart(&dev->mii);
+               priv->presvd_phy_advertise = 0;
+               priv->presvd_phy_bmcr = 0;
+       }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 04c22f508ed9..f8f90d77cf0f 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1253,6 +1253,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},    /* SIMCom 7230E */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  
Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
++      {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+       {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
+ 
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 767c485af59b..522719b494f3 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+       case PTP_PF_PHYSYNC:
+               if (chan != 0)
+                       return -EINVAL;
++              break;
+       default:
+               return -EINVAL;
+       }
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index eb2ec1fb07cb..209de7cd9358 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -361,6 +361,8 @@ struct ct_arg {
+       dma_addr_t      rsp_dma;
+       u32             req_size;
+       u32             rsp_size;
++      u32             req_allocated_size;
++      u32             rsp_allocated_size;
+       void            *req;
+       void            *rsp;
+       port_id_t       id;
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index 05abe5aaab7f..cbfbab5d9a59 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -556,7 +556,7 @@ static void qla2x00_async_sns_sp_done(void *s, int rc)
+               /* please ignore kernel warning. otherwise, we have mem leak. */
+               if (sp->u.iocb_cmd.u.ctarg.req) {
+                       dma_free_coherent(&vha->hw->pdev->dev,
+-                          sizeof(struct ct_sns_pkt),
++                          sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+                           sp->u.iocb_cmd.u.ctarg.req,
+                           sp->u.iocb_cmd.u.ctarg.req_dma);
+                       sp->u.iocb_cmd.u.ctarg.req = NULL;
+@@ -564,7 +564,7 @@ static void qla2x00_async_sns_sp_done(void *s, int rc)
+ 
+               if (sp->u.iocb_cmd.u.ctarg.rsp) {
+                       dma_free_coherent(&vha->hw->pdev->dev,
+-                          sizeof(struct ct_sns_pkt),
++                          sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+                           sp->u.iocb_cmd.u.ctarg.rsp,
+                           sp->u.iocb_cmd.u.ctarg.rsp_dma);
+                       sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+@@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t 
*d_id)
+       sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+           sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+           GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.req) {
+               ql_log(ql_log_warn, vha, 0xd041,
+                   "%s: Failed to allocate ct_sns request.\n",
+@@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t 
*d_id)
+       sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+           sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+           GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+               ql_log(ql_log_warn, vha, 0xd042,
+                   "%s: Failed to allocate ct_sns request.\n",
+@@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t 
*d_id,
+       sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+           sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+           GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.req) {
+               ql_log(ql_log_warn, vha, 0xd041,
+                   "%s: Failed to allocate ct_sns request.\n",
+@@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t 
*d_id,
+       sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+           sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+           GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+               ql_log(ql_log_warn, vha, 0xd042,
+                   "%s: Failed to allocate ct_sns request.\n",
+@@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t 
*d_id,
+       sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+           sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+           GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.req) {
+               ql_log(ql_log_warn, vha, 0xd041,
+                   "%s: Failed to allocate ct_sns request.\n",
+@@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t 
*d_id,
+       sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+           sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+           GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+               ql_log(ql_log_warn, vha, 0xd042,
+                   "%s: Failed to allocate ct_sns request.\n",
+@@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
+       sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+           sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+           GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.req) {
+               ql_log(ql_log_warn, vha, 0xd041,
+                   "%s: Failed to allocate ct_sns request.\n",
+@@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
+       sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+           sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+           GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+               ql_log(ql_log_warn, vha, 0xd042,
+                   "%s: Failed to allocate ct_sns request.\n",
+@@ -3392,14 +3400,14 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
+ {
+       if (sp->u.iocb_cmd.u.ctarg.req) {
+               dma_free_coherent(&vha->hw->pdev->dev,
+-                      sizeof(struct ct_sns_pkt),
++                      sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+                       sp->u.iocb_cmd.u.ctarg.req,
+                       sp->u.iocb_cmd.u.ctarg.req_dma);
+               sp->u.iocb_cmd.u.ctarg.req = NULL;
+       }
+       if (sp->u.iocb_cmd.u.ctarg.rsp) {
+               dma_free_coherent(&vha->hw->pdev->dev,
+-                      sizeof(struct ct_sns_pkt),
++                      sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+                       sp->u.iocb_cmd.u.ctarg.rsp,
+                       sp->u.iocb_cmd.u.ctarg.rsp_dma);
+               sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+@@ -3600,14 +3608,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int 
res)
+               /* please ignore kernel warning. otherwise, we have mem leak. */
+               if (sp->u.iocb_cmd.u.ctarg.req) {
+                       dma_free_coherent(&vha->hw->pdev->dev,
+-                              sizeof(struct ct_sns_pkt),
++                              sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+                               sp->u.iocb_cmd.u.ctarg.req,
+                               sp->u.iocb_cmd.u.ctarg.req_dma);
+                       sp->u.iocb_cmd.u.ctarg.req = NULL;
+               }
+               if (sp->u.iocb_cmd.u.ctarg.rsp) {
+                       dma_free_coherent(&vha->hw->pdev->dev,
+-                              sizeof(struct ct_sns_pkt),
++                              sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+                               sp->u.iocb_cmd.u.ctarg.rsp,
+                               sp->u.iocb_cmd.u.ctarg.rsp_dma);
+                       sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+@@ -3658,6 +3666,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t 
*id)
+       sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+               sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+               GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.req) {
+               ql_log(ql_log_warn, vha, 0xd041,
+                   "Failed to allocate ct_sns request.\n");
+@@ -3667,6 +3676,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t 
*id)
+       sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+               sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+               GFP_KERNEL);
++      sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+       if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+               ql_log(ql_log_warn, vha, 0xd042,
+                   "Failed to allocate ct_sns request.\n");
+@@ -4125,14 +4135,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, 
int res)
+                        */
+                       if (sp->u.iocb_cmd.u.ctarg.req) {
+                               dma_free_coherent(&vha->hw->pdev->dev,
+-                                  sizeof(struct ct_sns_pkt),
++                                  sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+                                   sp->u.iocb_cmd.u.ctarg.req,
+                                   sp->u.iocb_cmd.u.ctarg.req_dma);
+                               sp->u.iocb_cmd.u.ctarg.req = NULL;
+                       }
+                       if (sp->u.iocb_cmd.u.ctarg.rsp) {
+                               dma_free_coherent(&vha->hw->pdev->dev,
+-                                  sizeof(struct ct_sns_pkt),
++                                  sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+                                   sp->u.iocb_cmd.u.ctarg.rsp,
+                                   sp->u.iocb_cmd.u.ctarg.rsp_dma);
+                               sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+@@ -4162,14 +4172,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, 
int res)
+               /* please ignore kernel warning. Otherwise, we have mem leak. */
+               if (sp->u.iocb_cmd.u.ctarg.req) {
+                       dma_free_coherent(&vha->hw->pdev->dev,
+-                          sizeof(struct ct_sns_pkt),
++                          sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+                           sp->u.iocb_cmd.u.ctarg.req,
+                           sp->u.iocb_cmd.u.ctarg.req_dma);
+                       sp->u.iocb_cmd.u.ctarg.req = NULL;
+               }
+               if (sp->u.iocb_cmd.u.ctarg.rsp) {
+                       dma_free_coherent(&vha->hw->pdev->dev,
+-                          sizeof(struct ct_sns_pkt),
++                          sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+                           sp->u.iocb_cmd.u.ctarg.rsp,
+                           sp->u.iocb_cmd.u.ctarg.rsp_dma);
+                       sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+@@ -4264,14 +4274,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, 
struct srb *sp,
+ done_free_sp:
+       if (sp->u.iocb_cmd.u.ctarg.req) {
+               dma_free_coherent(&vha->hw->pdev->dev,
+-                  sizeof(struct ct_sns_pkt),
++                  sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+                   sp->u.iocb_cmd.u.ctarg.req,
+                   sp->u.iocb_cmd.u.ctarg.req_dma);
+               sp->u.iocb_cmd.u.ctarg.req = NULL;
+       }
+       if (sp->u.iocb_cmd.u.ctarg.rsp) {
+               dma_free_coherent(&vha->hw->pdev->dev,
+-                  sizeof(struct ct_sns_pkt),
++                  sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+                   sp->u.iocb_cmd.u.ctarg.rsp,
+                   sp->u.iocb_cmd.u.ctarg.rsp_dma);
+               sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+@@ -4332,6 +4342,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 
fc4_type, srb_t *sp)
+               sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
+                       &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
+                       &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
++              sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct 
ct_sns_pkt);
+               if (!sp->u.iocb_cmd.u.ctarg.req) {
+                       ql_log(ql_log_warn, vha, 0xffff,
+                           "Failed to allocate ct_sns request.\n");
+@@ -4349,6 +4360,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 
fc4_type, srb_t *sp)
+               sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
+                       &vha->hw->pdev->dev, rspsz,
+                       &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
++              sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct 
ct_sns_pkt);
+               if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+                       ql_log(ql_log_warn, vha, 0xffff,
+                           "Failed to allocate ct_sns request.\n");
+@@ -4408,14 +4420,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 
fc4_type, srb_t *sp)
+ done_free_sp:
+       if (sp->u.iocb_cmd.u.ctarg.req) {
+               dma_free_coherent(&vha->hw->pdev->dev,
+-                  sizeof(struct ct_sns_pkt),
++                  sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+                   sp->u.iocb_cmd.u.ctarg.req,
+                   sp->u.iocb_cmd.u.ctarg.req_dma);
+               sp->u.iocb_cmd.u.ctarg.req = NULL;
+       }
+       if (sp->u.iocb_cmd.u.ctarg.rsp) {
+               dma_free_coherent(&vha->hw->pdev->dev,
+-                  sizeof(struct ct_sns_pkt),
++                  sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+                   sp->u.iocb_cmd.u.ctarg.rsp,
+                   sp->u.iocb_cmd.u.ctarg.rsp_dma);
+               sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 636960ad029a..0cb552268be3 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -591,12 +591,14 @@ static void 
qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
+                               conflict_fcport =
+                                       qla2x00_find_fcport_by_wwpn(vha,
+                                           e->port_name, 0);
+-                              ql_dbg(ql_dbg_disc, vha, 0x20e6,
+-                                  "%s %d %8phC post del sess\n",
+-                                  __func__, __LINE__,
+-                                  conflict_fcport->port_name);
+-                              qlt_schedule_sess_for_deletion
+-                                      (conflict_fcport);
++                              if (conflict_fcport) {
++                                      qlt_schedule_sess_for_deletion
++                                              (conflict_fcport);
++                                      ql_dbg(ql_dbg_disc, vha, 0x20e6,
++                                          "%s %d %8phC post del sess\n",
++                                          __func__, __LINE__,
++                                          conflict_fcport->port_name);
++                              }
+                       }
+ 
+                       /* FW already picked this loop id for another fcport */
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 15eaa6dded04..2b0816dfe9bd 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3180,6 +3180,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct 
pci_device_id *id)
+           "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p 
rsp->rsp_q_out=%p.\n",
+           req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
+ 
++      ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
++
+       if (ha->isp_ops->initialize_adapter(base_vha)) {
+               ql_log(ql_log_fatal, base_vha, 0x00d6,
+                   "Failed to initialize adapter - Adapter flags %x.\n",
+@@ -3216,8 +3218,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct 
pci_device_id *id)
+           host->can_queue, base_vha->req,
+           base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ 
+-      ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+-
+       if (ha->mqenable) {
+               bool mq = false;
+               bool startit = false;
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index 210407cd2341..da868f6c9638 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -401,7 +401,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, 
unsigned char *buf)
+  * Check that all zones of the device are equal. The last zone can however
+  * be smaller. The zone size must also be a power of two number of LBAs.
+  *
+- * Returns the zone size in bytes upon success or an error code upon failure.
++ * Returns the zone size in number of blocks upon success or an error code
++ * upon failure.
+  */
+ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+ {
+@@ -411,7 +412,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+       unsigned char *rec;
+       unsigned int buf_len;
+       unsigned int list_length;
+-      int ret;
++      s64 ret;
+       u8 same;
+ 
+       /* Get a buffer */
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 711da3306b14..61c3dc2f3be5 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -844,6 +844,41 @@ static void xhci_disable_port_wake_on_bits(struct 
xhci_hcd *xhci)
+       spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ 
++static bool xhci_pending_portevent(struct xhci_hcd *xhci)
++{
++      __le32 __iomem          **port_array;
++      int                     port_index;
++      u32                     status;
++      u32                     portsc;
++
++      status = readl(&xhci->op_regs->status);
++      if (status & STS_EINT)
++              return true;
++      /*
++       * Checking STS_EINT is not enough as there is a lag between a change
++       * bit being set and the Port Status Change Event that it generated
++       * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
++       */
++
++      port_index = xhci->num_usb2_ports;
++      port_array = xhci->usb2_ports;
++      while (port_index--) {
++              portsc = readl(port_array[port_index]);
++              if (portsc & PORT_CHANGE_MASK ||
++                  (portsc & PORT_PLS_MASK) == XDEV_RESUME)
++                      return true;
++      }
++      port_index = xhci->num_usb3_ports;
++      port_array = xhci->usb3_ports;
++      while (port_index--) {
++              portsc = readl(port_array[port_index]);
++              if (portsc & PORT_CHANGE_MASK ||
++                  (portsc & PORT_PLS_MASK) == XDEV_RESUME)
++                      return true;
++      }
++      return false;
++}
++
+ /*
+  * Stop HC (not bus-specific)
+  *
+@@ -945,7 +980,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
+  */
+ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ {
+-      u32                     command, temp = 0, status;
++      u32                     command, temp = 0;
+       struct usb_hcd          *hcd = xhci_to_hcd(xhci);
+       struct usb_hcd          *secondary_hcd;
+       int                     retval = 0;
+@@ -1069,8 +1104,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+  done:
+       if (retval == 0) {
+               /* Resume root hubs only when have pending events. */
+-              status = readl(&xhci->op_regs->status);
+-              if (status & STS_EINT) {
++              if (xhci_pending_portevent(xhci)) {
+                       usb_hcd_resume_root_hub(xhci->shared_hcd);
+                       usb_hcd_resume_root_hub(hcd);
+               }
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 6dfc4867dbcf..9751c1373fbb 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -382,6 +382,10 @@ struct xhci_op_regs {
+ #define PORT_PLC      (1 << 22)
+ /* port configure error change - port failed to configure its link partner */
+ #define PORT_CEC      (1 << 23)
++#define PORT_CHANGE_MASK      (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
++                               PORT_RC | PORT_PLC | PORT_CEC)
++
++
+ /* Cold Attach Status - xHC can set this bit to report device attached during
+  * Sx state. Warm port reset should be perfomed to clear this bit and move 
port
+  * to connected state.
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index b423a309a6e0..125b58eff936 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -28,6 +28,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/vfio.h>
+ #include <linux/vgaarb.h>
++#include <linux/nospec.h>
+ 
+ #include "vfio_pci_private.h"
+ 
+@@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data,
+                       if (info.index >=
+                           VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+                               return -EINVAL;
++                      info.index = array_index_nospec(info.index,
++                                                      VFIO_PCI_NUM_REGIONS +
++                                                      vdev->num_regions);
+ 
+                       i = info.index - VFIO_PCI_NUM_REGIONS;
+ 
+diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c 
b/drivers/vfio/vfio_iommu_spapr_tce.c
+index 759a5bdd40e1..2da5f054257a 100644
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c
+@@ -457,13 +457,13 @@ static void tce_iommu_unuse_page(struct tce_container 
*container,
+ }
+ 
+ static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+-              unsigned long tce, unsigned long size,
++              unsigned long tce, unsigned long shift,
+               unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
+ {
+       long ret = 0;
+       struct mm_iommu_table_group_mem_t *mem;
+ 
+-      mem = mm_iommu_lookup(container->mm, tce, size);
++      mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
+       if (!mem)
+               return -EINVAL;
+ 
+@@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container 
*container,
+       if (!pua)
+               return;
+ 
+-      ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
++      ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
+                       &hpa, &mem);
+       if (ret)
+               pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
+@@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container 
*container,
+                               entry + i);
+ 
+               ret = tce_iommu_prereg_ua_to_hpa(container,
+-                              tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
++                              tce, tbl->it_page_shift, &hpa, &mem);
+               if (ret)
+                       break;
+ 
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index ffbbf0520d9e..6aa49dcaa938 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -697,13 +697,21 @@ static void fat_set_state(struct super_block *sb,
+       brelse(bh);
+ }
+ 
++static void fat_reset_iocharset(struct fat_mount_options *opts)
++{
++      if (opts->iocharset != fat_default_iocharset) {
++              /* Note: opts->iocharset can be NULL here */
++              kfree(opts->iocharset);
++              opts->iocharset = fat_default_iocharset;
++      }
++}
++
+ static void delayed_free(struct rcu_head *p)
+ {
+       struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
+       unload_nls(sbi->nls_disk);
+       unload_nls(sbi->nls_io);
+-      if (sbi->options.iocharset != fat_default_iocharset)
+-              kfree(sbi->options.iocharset);
++      fat_reset_iocharset(&sbi->options);
+       kfree(sbi);
+ }
+ 
+@@ -1118,7 +1126,7 @@ static int parse_options(struct super_block *sb, char 
*options, int is_vfat,
+       opts->fs_fmask = opts->fs_dmask = current_umask();
+       opts->allow_utime = -1;
+       opts->codepage = fat_default_codepage;
+-      opts->iocharset = fat_default_iocharset;
++      fat_reset_iocharset(opts);
+       if (is_vfat) {
+               opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
+               opts->rodir = 0;
+@@ -1275,8 +1283,7 @@ static int parse_options(struct super_block *sb, char 
*options, int is_vfat,
+ 
+               /* vfat specific */
+               case Opt_charset:
+-                      if (opts->iocharset != fat_default_iocharset)
+-                              kfree(opts->iocharset);
++                      fat_reset_iocharset(opts);
+                       iocharset = match_strdup(&args[0]);
+                       if (!iocharset)
+                               return -ENOMEM;
+@@ -1867,8 +1874,7 @@ int fat_fill_super(struct super_block *sb, void *data, 
int silent, int isvfat,
+               iput(fat_inode);
+       unload_nls(sbi->nls_io);
+       unload_nls(sbi->nls_disk);
+-      if (sbi->options.iocharset != fat_default_iocharset)
+-              kfree(sbi->options.iocharset);
++      fat_reset_iocharset(&sbi->options);
+       sb->s_fs_info = NULL;
+       kfree(sbi);
+       return error;
+diff --git a/fs/internal.h b/fs/internal.h
+index 980d005b21b4..5645b4ebf494 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -127,7 +127,6 @@ int do_fchownat(int dfd, const char __user *filename, 
uid_t user, gid_t group,
+ 
+ extern int open_check_o_direct(struct file *f);
+ extern int vfs_open(const struct path *, struct file *, const struct cred *);
+-extern struct file *filp_clone_open(struct file *);
+ 
+ /*
+  * inode.c
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 760d8da1b6c7..81fe0292a7ac 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2401,6 +2401,7 @@ extern struct file *filp_open(const char *, int, 
umode_t);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+                                  const char *, int, umode_t);
+ extern struct file * dentry_open(const struct path *, int, const struct cred 
*);
++extern struct file *filp_clone_open(struct file *);
+ extern int filp_close(struct file *, fl_owner_t id);
+ 
+ extern struct filename *getname_flags(const char __user *, int, int *);
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 5be31eb7b266..108ede99e533 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned 
long, int __user *,
+ extern long do_fork(unsigned long, unsigned long, unsigned long, int __user 
*, int __user *);
+ struct task_struct *fork_idle(int);
+ extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+-extern long kernel_wait4(pid_t, int *, int, struct rusage *);
++extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
+ 
+ extern void free_task(struct task_struct *tsk);
+ 
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 9065477ed255..15d8f9c84ca5 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -628,6 +628,7 @@ typedef unsigned char *sk_buff_data_t;
+  *    @hash: the packet hash
+  *    @queue_mapping: Queue mapping for multiqueue devices
+  *    @xmit_more: More SKBs are pending for this queue
++ *    @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+  *    @ndisc_nodetype: router type (from link layer)
+  *    @ooo_okay: allow the mapping of a socket to a queue to be changed
+  *    @l4_hash: indicate hash is a canonical 4-tuple hash over transport
+@@ -733,7 +734,7 @@ struct sk_buff {
+                               peeked:1,
+                               head_frag:1,
+                               xmit_more:1,
+-                              __unused:1; /* one bit hole */
++                              pfmemalloc:1;
+ 
+       /* fields enclosed in headers_start/headers_end are copied
+        * using a single memcpy() in __copy_skb_header()
+@@ -752,31 +753,30 @@ struct sk_buff {
+ 
+       __u8                    __pkt_type_offset[0];
+       __u8                    pkt_type:3;
+-      __u8                    pfmemalloc:1;
+       __u8                    ignore_df:1;
+-
+       __u8                    nf_trace:1;
+       __u8                    ip_summed:2;
+       __u8                    ooo_okay:1;
++
+       __u8                    l4_hash:1;
+       __u8                    sw_hash:1;
+       __u8                    wifi_acked_valid:1;
+       __u8                    wifi_acked:1;
+-
+       __u8                    no_fcs:1;
+       /* Indicates the inner headers are valid in the skbuff. */
+       __u8                    encapsulation:1;
+       __u8                    encap_hdr_csum:1;
+       __u8                    csum_valid:1;
++
+       __u8                    csum_complete_sw:1;
+       __u8                    csum_level:2;
+       __u8                    csum_not_inet:1;
+-
+       __u8                    dst_pending_confirm:1;
+ #ifdef CONFIG_IPV6_NDISC_NODETYPE
+       __u8                    ndisc_nodetype:2;
+ #endif
+       __u8                    ipvs_property:1;
++
+       __u8                    inner_protocol_type:1;
+       __u8                    remcsum_offload:1;
+ #ifdef CONFIG_NET_SWITCHDEV
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index a406f2e8680a..aeebbbb9e0bd 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -829,7 +829,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, 
struct sk_buff *skb,
+        * to minimize possbility that any useful information to an
+        * attacker is leaked. Only lower 20 bits are relevant.
+        */
+-      rol32(hash, 16);
++      hash = rol32(hash, 16);
+ 
+       flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+ 
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 35498e613ff5..edfa9d0f6005 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -609,10 +609,15 @@ static inline struct dst_entry 
*sctp_transport_dst_check(struct sctp_transport *
+       return t->dst;
+ }
+ 
++static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
++{
++      return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
++                               SCTP_DEFAULT_MINSEGMENT));
++}
++
+ static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+ {
+-      __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
+-                         SCTP_DEFAULT_MINSEGMENT);
++      __u32 pmtu = sctp_dst_mtu(t->dst);
+ 
+       if (t->pathmtu == pmtu)
+               return true;
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 64c0291b579c..2f6fa95de2d8 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -270,7 +270,11 @@ static int cpu_stop_queue_two_works(int cpu1, struct 
cpu_stop_work *work1,
+               goto retry;
+       }
+ 
+-      wake_up_q(&wakeq);
++      if (!err) {
++              preempt_disable();
++              wake_up_q(&wakeq);
++              preempt_enable();
++      }
+ 
+       return err;
+ }
+diff --git a/lib/rhashtable.c b/lib/rhashtable.c
+index 2b2b79974b61..240a8b864d5b 100644
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -923,8 +923,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
+ 
+ static size_t rounded_hashtable_size(const struct rhashtable_params *params)
+ {
+-      return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+-                 (unsigned long)params->min_size);
++      size_t retsize;
++
++      if (params->nelem_hint)
++              retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
++                            (unsigned long)params->min_size);
++      else
++              retsize = max(HASH_DEFAULT_SIZE,
++                            (unsigned long)params->min_size);
++
++      return retsize;
+ }
+ 
+ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
+@@ -981,8 +989,6 @@ int rhashtable_init(struct rhashtable *ht,
+       struct bucket_table *tbl;
+       size_t size;
+ 
+-      size = HASH_DEFAULT_SIZE;
+-
+       if ((!params->key_len && !params->obj_hashfn) ||
+           (params->obj_hashfn && !params->obj_cmpfn))
+               return -EINVAL;
+@@ -1009,8 +1015,7 @@ int rhashtable_init(struct rhashtable *ht,
+ 
+       ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
+ 
+-      if (params->nelem_hint)
+-              size = rounded_hashtable_size(&ht->p);
++      size = rounded_hashtable_size(&ht->p);
+ 
+       if (params->locks_mul)
+               ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
+@@ -1102,13 +1107,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
+                                void (*free_fn)(void *ptr, void *arg),
+                                void *arg)
+ {
+-      struct bucket_table *tbl;
++      struct bucket_table *tbl, *next_tbl;
+       unsigned int i;
+ 
+       cancel_work_sync(&ht->run_work);
+ 
+       mutex_lock(&ht->mutex);
+       tbl = rht_dereference(ht->tbl, ht);
++restart:
+       if (free_fn) {
+               for (i = 0; i < tbl->size; i++) {
+                       struct rhash_head *pos, *next;
+@@ -1125,7 +1131,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
+               }
+       }
+ 
++      next_tbl = rht_dereference(tbl->future_tbl, ht);
+       bucket_table_free(tbl);
++      if (next_tbl) {
++              tbl = next_tbl;
++              goto restart;
++      }
+       mutex_unlock(&ht->mutex);
+ }
+ EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index b9f3dbd885bd..327e12679dd5 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2087,6 +2087,8 @@ static void __split_huge_pmd_locked(struct 
vm_area_struct *vma, pmd_t *pmd,
+               if (vma_is_dax(vma))
+                       return;
+               page = pmd_page(_pmd);
++              if (!PageDirty(page) && pmd_dirty(_pmd))
++                      set_page_dirty(page);
+               if (!PageReferenced(page) && pmd_young(_pmd))
+                       SetPageReferenced(page);
+               page_remove_rmap(page, true);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 2bd3df3d101a..95c0980a6f7e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -850,7 +850,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup 
*dead_memcg)
+       int nid;
+       int i;
+ 
+-      while ((memcg = parent_mem_cgroup(memcg))) {
++      for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+               for_each_node(nid) {
+                       mz = mem_cgroup_nodeinfo(memcg, nid);
+                       for (i = 0; i <= DEF_PRIORITY; i++) {
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index b2b2323bdc84..188d693cb251 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, 
int tc_stats_type,
+               d->lock = lock;
+               spin_lock_bh(lock);
+       }
+-      if (d->tail)
+-              return gnet_stats_copy(d, type, NULL, 0, padattr);
++      if (d->tail) {
++              int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
++
++              /* The initial attribute added in gnet_stats_copy() may be
++               * preceded by a padding attribute, in which case d->tail will
++               * end up pointing at the padding instead of the real attribute.
++               * Fix this so gnet_stats_finish_copy() adjusts the length of
++               * the right attribute.
++               */
++              if (ret == 0 && d->tail->nla_type == padattr)
++                      d->tail = (struct nlattr *)((char *)d->tail +
++                                                  
NLA_ALIGN(d->tail->nla_len));
++              return ret;
++      }
+ 
+       return 0;
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 345b51837ca8..a84d69c047ac 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, 
struct sk_buff *skb)
+       n->cloned = 1;
+       n->nohdr = 0;
+       n->peeked = 0;
++      C(pfmemalloc);
+       n->destructor = NULL;
+       C(tail);
+       C(end);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index e66172aaf241..511d6748ea5f 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
+       if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+               struct flowi4 fl4 = {
+                       .flowi4_iif = LOOPBACK_IFINDEX,
++                      .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
+                       .daddr = ip_hdr(skb)->saddr,
+                       .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+                       .flowi4_scope = scope,
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 2f600f261690..61e42a3390ba 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -187,8 +187,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, 
int write,
+       if (write && ret == 0) {
+               low = make_kgid(user_ns, urange[0]);
+               high = make_kgid(user_ns, urange[1]);
+-              if (!gid_valid(low) || !gid_valid(high) ||
+-                  (urange[1] < urange[0]) || gid_lt(high, low)) {
++              if (!gid_valid(low) || !gid_valid(high))
++                      return -EINVAL;
++              if (urange[1] < urange[0] || gid_lt(high, low)) {
+                       low = make_kgid(&init_user_ns, 1);
+                       high = make_kgid(&init_user_ns, 0);
+               }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index c9d00ef54dec..58e316cf6607 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3524,8 +3524,7 @@ int tcp_abort(struct sock *sk, int err)
+                       struct request_sock *req = inet_reqsk(sk);
+ 
+                       local_bh_disable();
+-                      inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+-                                                        req);
++                      inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+                       local_bh_enable();
+                       return 0;
+               }
+diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
+index 11e4e80cf7e9..0efb914695ac 100644
+--- a/net/ipv6/Kconfig
++++ b/net/ipv6/Kconfig
+@@ -108,6 +108,7 @@ config IPV6_MIP6
+ config IPV6_ILA
+       tristate "IPv6: Identifier Locator Addressing (ILA)"
+       depends on NETFILTER
++      select DST_CACHE
+       select LWTUNNEL
+       ---help---
+         Support for IPv6 Identifier Locator Addressing (ILA).
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 458de353f5d9..1a4d6897d17f 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -927,7 +927,6 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
+ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+                                        struct net_device *dev)
+ {
+-      struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct dst_entry *dst = skb_dst(skb);
+       struct net_device_stats *stats;
+@@ -998,6 +997,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff 
*skb,
+                       goto tx_err;
+               }
+       } else {
++              struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++
+               switch (skb->protocol) {
+               case htons(ETH_P_IP):
+                       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 525051a886bc..3ff9316616d8 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
+                       return;
+               }
+       }
+-      if (ndopts.nd_opts_nonce)
++      if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
+               memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6);
+ 
+       inc = ipv6_addr_is_multicast(daddr);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index b94345e657f7..3ed4de230830 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4274,6 +4274,13 @@ static int ip6_route_multipath_add(struct fib6_config 
*cfg,
+                       err_nh = nh;
+                       goto add_errout;
+               }
++              if (!rt6_qualify_for_ecmp(rt)) {
++                      err = -EINVAL;
++                      NL_SET_ERR_MSG(extack,
++                                     "Device only routes can not be added for 
IPv6 using the multipath API.");
++                      dst_release_immediate(&rt->dst);
++                      goto cleanup;
++              }
+ 
+               /* Because each route is added like a single route we remove
+                * these flags after the first nexthop: if there is a collision,
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 22fa13cf5d8b..846883907cd4 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -479,23 +479,27 @@ static int fq_codel_init(struct Qdisc *sch, struct 
nlattr *opt,
+       q->cparams.mtu = psched_mtu(qdisc_dev(sch));
+ 
+       if (opt) {
+-              int err = fq_codel_change(sch, opt, extack);
++              err = fq_codel_change(sch, opt, extack);
+               if (err)
+-                      return err;
++                      goto init_failure;
+       }
+ 
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
+       if (err)
+-              return err;
++              goto init_failure;
+ 
+       if (!q->flows) {
+               q->flows = kvzalloc(q->flows_cnt *
+                                          sizeof(struct fq_codel_flow), 
GFP_KERNEL);
+-              if (!q->flows)
+-                      return -ENOMEM;
++              if (!q->flows) {
++                      err = -ENOMEM;
++                      goto init_failure;
++              }
+               q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
+-              if (!q->backlogs)
+-                      return -ENOMEM;
++              if (!q->backlogs) {
++                      err = -ENOMEM;
++                      goto alloc_failure;
++              }
+               for (i = 0; i < q->flows_cnt; i++) {
+                       struct fq_codel_flow *flow = q->flows + i;
+ 
+@@ -508,6 +512,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr 
*opt,
+       else
+               sch->flags &= ~TCQ_F_CAN_BYPASS;
+       return 0;
++
++alloc_failure:
++      kvfree(q->flows);
++      q->flows = NULL;
++init_failure:
++      q->flows_cnt = 0;
++      return err;
+ }
+ 
+ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index a47179da24e6..ef8adac1be83 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1446,11 +1446,9 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
+               return;
+ 
+       /* Get the lowest pmtu of all the transports. */
+-      list_for_each_entry(t, &asoc->peer.transport_addr_list,
+-                              transports) {
++      list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
+               if (t->pmtu_pending && t->dst) {
+-                      sctp_transport_update_pmtu(
+-                                      t, SCTP_TRUNC4(dst_mtu(t->dst)));
++                      sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst));
+                       t->pmtu_pending = 0;
+               }
+               if (!pmtu || (t->pathmtu < pmtu))
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index 03fc2c427aca..e890ceb55939 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -242,9 +242,9 @@ void sctp_transport_pmtu(struct sctp_transport *transport, 
struct sock *sk)
+                                               &transport->fl, sk);
+       }
+ 
+-      if (transport->dst) {
+-              transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
+-      } else
++      if (transport->dst)
++              transport->pathmtu = sctp_dst_mtu(transport->dst);
++      else
+               transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+ }
+ 
+@@ -273,7 +273,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, 
u32 pmtu)
+ 
+       if (dst) {
+               /* Re-fetch, as under layers may have a higher minimum size */
+-              pmtu = SCTP_TRUNC4(dst_mtu(dst));
++              pmtu = sctp_dst_mtu(dst);
+               change = t->pathmtu != pmtu;
+       }
+       t->pathmtu = pmtu;
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index 69616d00481c..b53026a72e73 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card 
*card,
+ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
+                             struct snd_rawmidi_params * params)
+ {
+-      char *newbuf;
++      char *newbuf, *oldbuf;
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+       
+       if (substream->append && substream->use_count > 1)
+@@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct 
snd_rawmidi_substream *substream,
+               return -EINVAL;
+       }
+       if (params->buffer_size != runtime->buffer_size) {
+-              newbuf = krealloc(runtime->buffer, params->buffer_size,
+-                                GFP_KERNEL);
++              newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
+               if (!newbuf)
+                       return -ENOMEM;
++              spin_lock_irq(&runtime->lock);
++              oldbuf = runtime->buffer;
+               runtime->buffer = newbuf;
+               runtime->buffer_size = params->buffer_size;
+               runtime->avail = runtime->buffer_size;
++              runtime->appl_ptr = runtime->hw_ptr = 0;
++              spin_unlock_irq(&runtime->lock);
++              kfree(oldbuf);
+       }
+       runtime->avail_min = params->avail_min;
+       substream->active_sensing = !params->no_active_sensing;
+@@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
+ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
+                            struct snd_rawmidi_params * params)
+ {
+-      char *newbuf;
++      char *newbuf, *oldbuf;
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+ 
+       snd_rawmidi_drain_input(substream);
+@@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct 
snd_rawmidi_substream *substream,
+               return -EINVAL;
+       }
+       if (params->buffer_size != runtime->buffer_size) {
+-              newbuf = krealloc(runtime->buffer, params->buffer_size,
+-                                GFP_KERNEL);
++              newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
+               if (!newbuf)
+                       return -ENOMEM;
++              spin_lock_irq(&runtime->lock);
++              oldbuf = runtime->buffer;
+               runtime->buffer = newbuf;
+               runtime->buffer_size = params->buffer_size;
++              runtime->appl_ptr = runtime->hw_ptr = 0;
++              spin_unlock_irq(&runtime->lock);
++              kfree(oldbuf);
+       }
+       runtime->avail_min = params->avail_min;
+       return 0;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index ba9a7e552183..88ce2f1022e1 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -965,6 +965,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+       SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+       SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", 
CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", 
CXT_FIXUP_MUTE_LED_GPIO),
++      SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", 
CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", 
CXT_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", 
CXT_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 066efe783fe8..7bba415cb850 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2363,6 +2363,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+       SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+       SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", 
ALC882_FIXUP_ABIT_AW9D_MAX),
+       SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
++      SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+       SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+@@ -6543,6 +6544,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", 
ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
+       SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", 
ALC269_FIXUP_LIFEBOOK_EXTMIC),
+       SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", 
ALC700_FIXUP_INTEL_REFERENCE),
++      SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", 
ALC269_FIXUP_HEADSET_MODE),
+       SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", 
ALC269_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", 
ALC269_FIXUP_ATIV_BOOK_8),
+       SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", 
ALC283_FIXUP_HEADSET_MIC),
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 6e865e8b5b10..fe6eb0fe07f6 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
+ {
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(work, struct kvm_kernel_irqfd, shutdown);
++      struct kvm *kvm = irqfd->kvm;
+       u64 cnt;
+ 
++      /* Make sure irqfd has been initalized in assign path. */
++      synchronize_srcu(&kvm->irq_srcu);
++
+       /*
+        * Synchronize with the wait-queue and unhook ourselves to prevent
+        * further events.
+@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+ 
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       irqfd_update(kvm, irqfd);
+-      srcu_read_unlock(&kvm->irq_srcu, idx);
+ 
+       list_add_tail(&irqfd->list, &kvm->irqfds.items);
+ 
+@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+       if (events & EPOLLIN)
+               schedule_work(&irqfd->inject);
+ 
+-      /*
+-       * do not drop the file until the irqfd is fully initialized, otherwise
+-       * we might race against the EPOLLHUP
+-       */
+-      fdput(f);
+ #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+       if (kvm_arch_has_irq_bypass()) {
+               irqfd->consumer.token = (void *)irqfd->eventfd;
+@@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+       }
+ #endif
+ 
++      srcu_read_unlock(&kvm->irq_srcu, idx);
++
++      /*
++       * do not drop the file until the irqfd is fully initialized, otherwise
++       * we might race against the EPOLLHUP
++       */
++      fdput(f);
+       return 0;
+ 
+ fail:

Reply via email to