commit:     819b0cffa158a73b6276046ea0cb831c15ae8314
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Feb  1 17:23:02 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Feb  1 17:23:02 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=819b0cff

Linux patch 5.10.96

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1095_linux-5.10.96.patch | 3881 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3885 insertions(+)

diff --git a/0000_README b/0000_README
index 5f3cbb9a..cc530626 100644
--- a/0000_README
+++ b/0000_README
@@ -423,6 +423,10 @@ Patch:  1094_linux-5.10.95.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.95
 
+Patch:  1095_linux-5.10.96.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.96
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1095_linux-5.10.96.patch b/1095_linux-5.10.96.patch
new file mode 100644
index 00000000..6d0571ac
--- /dev/null
+++ b/1095_linux-5.10.96.patch
@@ -0,0 +1,3881 @@
+diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt 
b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+index 0968b40aef1e8..e3501bfa22e90 100644
+--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
++++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+@@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               spi-max-frequency = <10000000>;
+-              bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
++              bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>;
+               interrupt-parent = <&gpio1>;
+               interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+               device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+diff --git a/Makefile b/Makefile
+index fa98893aae615..c43133c8a5b1f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 95
++SUBLEVEL = 96
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 4999caff32818..22275d8518eb3 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -511,34 +511,26 @@ static void entry_task_switch(struct task_struct *next)
+ 
+ /*
+  * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
+- * Assuming the virtual counter is enabled at the beginning of times:
+- *
+- * - disable access when switching from a 64bit task to a 32bit task
+- * - enable access when switching from a 32bit task to a 64bit task
++ * Ensure access is disabled when switching to a 32bit task, ensure
++ * access is enabled when switching to a 64bit task.
+  */
+-static void erratum_1418040_thread_switch(struct task_struct *prev,
+-                                        struct task_struct *next)
++static void erratum_1418040_thread_switch(struct task_struct *next)
+ {
+-      bool prev32, next32;
+-      u64 val;
+-
+-      if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
+-              return;
+-
+-      prev32 = is_compat_thread(task_thread_info(prev));
+-      next32 = is_compat_thread(task_thread_info(next));
+-
+-      if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
++      if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
++          !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
+               return;
+ 
+-      val = read_sysreg(cntkctl_el1);
+-
+-      if (!next32)
+-              val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
++      if (is_compat_thread(task_thread_info(next)))
++              sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
+       else
+-              val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
++              sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
++}
+ 
+-      write_sysreg(val, cntkctl_el1);
++static void erratum_1418040_new_exec(void)
++{
++      preempt_disable();
++      erratum_1418040_thread_switch(current);
++      preempt_enable();
+ }
+ 
+ /*
+@@ -556,7 +548,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct 
task_struct *prev,
+       entry_task_switch(next);
+       uao_thread_switch(next);
+       ssbs_thread_switch(next);
+-      erratum_1418040_thread_switch(prev, next);
++      erratum_1418040_thread_switch(next);
+ 
+       /*
+        * Complete any pending TLB or cache maintenance on this CPU in case
+@@ -622,6 +614,7 @@ void arch_setup_new_exec(void)
+       current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+ 
+       ptrauth_thread_init_user(current);
++      erratum_1418040_new_exec();
+ 
+       if (task_spec_ssb_noexec(current)) {
+               arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
+diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h 
b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+index a8982d52f6b1d..cbde06d0fb380 100644
+--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
++++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+@@ -102,6 +102,8 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
+ extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
+ extern s32 patch__flush_hash_B;
+ 
++int __init find_free_bat(void);
++unsigned int bat_block_size(unsigned long base, unsigned long top);
+ #endif /* !__ASSEMBLY__ */
+ 
+ /* We happily ignore the smaller BATs on 601, we don't actually use
+diff --git a/arch/powerpc/include/asm/ppc-opcode.h 
b/arch/powerpc/include/asm/ppc-opcode.h
+index a6e3700c4566a..f0c0816f57270 100644
+--- a/arch/powerpc/include/asm/ppc-opcode.h
++++ b/arch/powerpc/include/asm/ppc-opcode.h
+@@ -449,6 +449,7 @@
+ #define PPC_RAW_LDX(r, base, b)               (0x7c00002a | ___PPC_RT(r) | 
___PPC_RA(base) | ___PPC_RB(b))
+ #define PPC_RAW_LHZ(r, base, i)               (0xa0000000 | ___PPC_RT(r) | 
___PPC_RA(base) | IMM_L(i))
+ #define PPC_RAW_LHBRX(r, base, b)     (0x7c00062c | ___PPC_RT(r) | 
___PPC_RA(base) | ___PPC_RB(b))
++#define PPC_RAW_LWBRX(r, base, b)     (0x7c00042c | ___PPC_RT(r) | 
___PPC_RA(base) | ___PPC_RB(b))
+ #define PPC_RAW_LDBRX(r, base, b)     (0x7c000428 | ___PPC_RT(r) | 
___PPC_RA(base) | ___PPC_RB(b))
+ #define PPC_RAW_STWCX(s, a, b)                (0x7c00012d | ___PPC_RS(s) | 
___PPC_RA(a) | ___PPC_RB(b))
+ #define PPC_RAW_CMPWI(a, i)           (0x2c000000 | ___PPC_RA(a) | IMM_L(i))
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index fe2ef598e2ead..376104c166fcf 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -11,6 +11,7 @@ CFLAGS_prom_init.o      += -fPIC
+ CFLAGS_btext.o                += -fPIC
+ endif
+ 
++CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index 58991233381ed..0697a0e014ae8 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -19,6 +19,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
+ CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
+ endif
+ 
++CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++
+ obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o 
test_code-patching.o
+ 
+ ifndef CONFIG_KASAN
+diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
+index a59e7ec981803..602ab13127b40 100644
+--- a/arch/powerpc/mm/book3s32/mmu.c
++++ b/arch/powerpc/mm/book3s32/mmu.c
+@@ -72,7 +72,7 @@ unsigned long p_block_mapped(phys_addr_t pa)
+       return 0;
+ }
+ 
+-static int find_free_bat(void)
++int __init find_free_bat(void)
+ {
+       int b;
+       int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
+@@ -96,7 +96,7 @@ static int find_free_bat(void)
+  * - block size has to be a power of two. This is calculated by finding the
+  *   highest bit set to 1.
+  */
+-static unsigned int block_size(unsigned long base, unsigned long top)
++unsigned int bat_block_size(unsigned long base, unsigned long top)
+ {
+       unsigned int max_size = SZ_256M;
+       unsigned int base_shift = (ffs(base) - 1) & 31;
+@@ -141,7 +141,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long 
base, unsigned long to
+       int idx;
+ 
+       while ((idx = find_free_bat()) != -1 && base != top) {
+-              unsigned int size = block_size(base, top);
++              unsigned int size = bat_block_size(base, top);
+ 
+               if (size < 128 << 10)
+                       break;
+@@ -201,18 +201,17 @@ void mmu_mark_initmem_nx(void)
+       int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
+       int i;
+       unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
+-      unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
++      unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
+       unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
+       unsigned long size;
+ 
+-      for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
+-              size = block_size(base, top);
++      for (i = 0; i < nb - 1 && base < top;) {
++              size = bat_block_size(base, top);
+               setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
+               base += size;
+       }
+       if (base < top) {
+-              size = block_size(base, top);
+-              size = max(size, 128UL << 10);
++              size = bat_block_size(base, top);
+               if ((top - base) > size) {
+                       size <<= 1;
+                       if (strict_kernel_rwx_enabled() && base + size > border)
+diff --git a/arch/powerpc/mm/kasan/book3s_32.c 
b/arch/powerpc/mm/kasan/book3s_32.c
+index 35b287b0a8da4..450a67ef0bbe1 100644
+--- a/arch/powerpc/mm/kasan/book3s_32.c
++++ b/arch/powerpc/mm/kasan/book3s_32.c
+@@ -10,48 +10,51 @@ int __init kasan_init_region(void *start, size_t size)
+ {
+       unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
+       unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
+-      unsigned long k_cur = k_start;
+-      int k_size = k_end - k_start;
+-      int k_size_base = 1 << (ffs(k_size) - 1);
++      unsigned long k_nobat = k_start;
++      unsigned long k_cur;
++      phys_addr_t phys;
+       int ret;
+-      void *block;
+ 
+-      block = memblock_alloc(k_size, k_size_base);
+-
+-      if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, 
k_size_base)) {
+-              int shift = ffs(k_size - k_size_base);
+-              int k_size_more = shift ? 1 << (shift - 1) : 0;
+-
+-              setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
+-              if (k_size_more >= SZ_128K)
+-                      setbat(-1, k_start + k_size_base, __pa(block) + 
k_size_base,
+-                             k_size_more, PAGE_KERNEL);
+-              if (v_block_mapped(k_start))
+-                      k_cur = k_start + k_size_base;
+-              if (v_block_mapped(k_start + k_size_base))
+-                      k_cur = k_start + k_size_base + k_size_more;
+-
+-              update_bats();
++      while (k_nobat < k_end) {
++              unsigned int k_size = bat_block_size(k_nobat, k_end);
++              int idx = find_free_bat();
++
++              if (idx == -1)
++                      break;
++              if (k_size < SZ_128K)
++                      break;
++              phys = memblock_phys_alloc_range(k_size, k_size, 0,
++                                               MEMBLOCK_ALLOC_ANYWHERE);
++              if (!phys)
++                      break;
++
++              setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
++              k_nobat += k_size;
+       }
++      if (k_nobat != k_start)
++              update_bats();
+ 
+-      if (!block)
+-              block = memblock_alloc(k_size, PAGE_SIZE);
+-      if (!block)
+-              return -ENOMEM;
++      if (k_nobat < k_end) {
++              phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
++                                               MEMBLOCK_ALLOC_ANYWHERE);
++              if (!phys)
++                      return -ENOMEM;
++      }
+ 
+       ret = kasan_init_shadow_page_tables(k_start, k_end);
+       if (ret)
+               return ret;
+ 
+-      kasan_update_early_region(k_start, k_cur, __pte(0));
++      kasan_update_early_region(k_start, k_nobat, __pte(0));
+ 
+-      for (; k_cur < k_end; k_cur += PAGE_SIZE) {
++      for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
+               pmd_t *pmd = pmd_off_k(k_cur);
+-              void *va = block + k_cur - k_start;
+-              pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
++              pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), 
PAGE_KERNEL);
+ 
+               __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), 
pte, 0);
+       }
+       flush_tlb_kernel_range(k_start, k_end);
++      memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
++
+       return 0;
+ }
+diff --git a/arch/powerpc/net/bpf_jit_comp64.c 
b/arch/powerpc/net/bpf_jit_comp64.c
+index 8936090acb579..0d47514e8870d 100644
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -651,17 +651,21 @@ bpf_alu32_trunc:
+                               EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
+                               break;
+                       case 64:
+-                              /*
+-                               * Way easier and faster(?) to store the value
+-                               * into stack and then use ldbrx
+-                               *
+-                               * ctx->seen will be reliable in pass2, but
+-                               * the instructions generated will remain the
+-                               * same across all passes
+-                               */
++                              /* Store the value to stack and then use 
byte-reverse loads */
+                               PPC_BPF_STL(dst_reg, 1, 
bpf_jit_stack_local(ctx));
+                               EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, 
bpf_jit_stack_local(ctx)));
+-                              EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
++                              if (cpu_has_feature(CPU_FTR_ARCH_206)) {
++                                      EMIT(PPC_RAW_LDBRX(dst_reg, 0, 
b2p[TMP_REG_1]));
++                              } else {
++                                      EMIT(PPC_RAW_LWBRX(dst_reg, 0, 
b2p[TMP_REG_1]));
++                                      if 
(IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
++                                              EMIT(PPC_RAW_SLDI(dst_reg, 
dst_reg, 32));
++                                      EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
++                                      EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], 
b2p[TMP_REG_2], b2p[TMP_REG_1]));
++                                      if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
++                                              
EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
++                                      EMIT(PPC_RAW_OR(dst_reg, dst_reg, 
b2p[TMP_REG_2]));
++                              }
+                               break;
+                       }
+                       break;
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index bd34e062bd290..e49aa8fc6a491 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -1273,9 +1273,20 @@ static void power_pmu_disable(struct pmu *pmu)
+                * Otherwise provide a warning if there is PMI pending, but
+                * no counter is found overflown.
+                */
+-              if (any_pmc_overflown(cpuhw))
+-                      clear_pmi_irq_pending();
+-              else
++              if (any_pmc_overflown(cpuhw)) {
++                      /*
++                       * Since power_pmu_disable runs under local_irq_save, it
++                       * could happen that code hits a PMC overflow without 
PMI
++                       * pending in paca. Hence only clear PMI pending if it 
was
++                       * set.
++                       *
++                       * If a PMI is pending, then MSR[EE] must be disabled 
(because
++                       * the masked PMI handler disabling EE). So it is safe 
to
++                       * call clear_pmi_irq_pending().
++                       */
++                      if (pmi_irq_pending())
++                              clear_pmi_irq_pending();
++              } else
+                       WARN_ON(pmi_irq_pending());
+ 
+               val = mmcra = cpuhw->mmcr.mmcra;
+diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
+index e1fcc03159ef2..a927adccb4ba7 100644
+--- a/arch/s390/hypfs/hypfs_vm.c
++++ b/arch/s390/hypfs/hypfs_vm.c
+@@ -20,6 +20,7 @@
+ 
+ static char local_guest[] = "        ";
+ static char all_guests[] = "*       ";
++static char *all_groups = all_guests;
+ static char *guest_query;
+ 
+ struct diag2fc_data {
+@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
+ 
+       memcpy(parm_list.userid, query, NAME_LEN);
+       ASCEBC(parm_list.userid, NAME_LEN);
+-      parm_list.addr = (unsigned long) addr ;
++      memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
++      ASCEBC(parm_list.aci_grp, NAME_LEN);
++      parm_list.addr = (unsigned long)addr;
+       parm_list.size = size;
+       parm_list.fmt = 0x02;
+-      memset(parm_list.aci_grp, 0x40, NAME_LEN);
+       rc = -1;
+ 
+       diag_stat_inc(DIAG_STAT_X2FC);
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index 4055f1c498147..b81bc96216b97 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -30,7 +30,7 @@
+ #define DEBUGP(fmt , ...)
+ #endif
+ 
+-#define PLT_ENTRY_SIZE 20
++#define PLT_ENTRY_SIZE 22
+ 
+ void *module_alloc(unsigned long size)
+ {
+@@ -330,27 +330,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, 
Elf_Sym *symtab,
+       case R_390_PLTOFF32:    /* 32 bit offset from GOT to PLT. */
+       case R_390_PLTOFF64:    /* 16 bit offset from GOT to PLT. */
+               if (info->plt_initialized == 0) {
+-                      unsigned int insn[5];
+-                      unsigned int *ip = me->core_layout.base +
+-                                         me->arch.plt_offset +
+-                                         info->plt_offset;
+-
+-                      insn[0] = 0x0d10e310;   /* basr 1,0  */
+-                      insn[1] = 0x100a0004;   /* lg   1,10(1) */
++                      unsigned char insn[PLT_ENTRY_SIZE];
++                      char *plt_base;
++                      char *ip;
++
++                      plt_base = me->core_layout.base + me->arch.plt_offset;
++                      ip = plt_base + info->plt_offset;
++                      *(int *)insn = 0x0d10e310;      /* basr 1,0  */
++                      *(int *)&insn[4] = 0x100c0004;  /* lg   1,12(1) */
+                       if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+-                              unsigned int *ij;
+-                              ij = me->core_layout.base +
+-                                      me->arch.plt_offset +
+-                                      me->arch.plt_size - PLT_ENTRY_SIZE;
+-                              insn[2] = 0xa7f40000 +  /* j __jump_r1 */
+-                                      (unsigned int)(u16)
+-                                      (((unsigned long) ij - 8 -
+-                                        (unsigned long) ip) / 2);
++                              char *jump_r1;
++
++                              jump_r1 = plt_base + me->arch.plt_size -
++                                      PLT_ENTRY_SIZE;
++                              /* brcl 0xf,__jump_r1 */
++                              *(short *)&insn[8] = 0xc0f4;
++                              *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
+                       } else {
+-                              insn[2] = 0x07f10000;   /* br %r1 */
++                              *(int *)&insn[8] = 0x07f10000;  /* br %r1 */
+                       }
+-                      insn[3] = (unsigned int) (val >> 32);
+-                      insn[4] = (unsigned int) val;
++                      *(long *)&insn[14] = val;
+ 
+                       write(ip, insn, sizeof(insn));
+                       info->plt_initialized = 1;
+diff --git a/arch/x86/events/intel/uncore_snbep.c 
b/arch/x86/events/intel/uncore_snbep.c
+index ba26792d96731..03c8047bebb38 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -5239,7 +5239,7 @@ static struct intel_uncore_type icx_uncore_imc = {
+       .fixed_ctr_bits = 48,
+       .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
+       .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
+-      .event_descs    = hswep_uncore_imc_events,
++      .event_descs    = snr_uncore_imc_events,
+       .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
+       .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
+       .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index 0c6b02dd744c1..f73f1184b1c13 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -387,7 +387,7 @@ static void threshold_restart_bank(void *_tr)
+       u32 hi, lo;
+ 
+       /* sysfs write might race against an offline operation */
+-      if (this_cpu_read(threshold_banks))
++      if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off)
+               return;
+ 
+       rdmsr(tr->b->address, lo, hi);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 5e1d7396a6b8a..2e6332af98aba 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4146,13 +4146,6 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu 
*vcpu, void *insn, int i
+       if (likely(!insn || insn_len))
+               return true;
+ 
+-      /*
+-       * If RIP is invalid, go ahead with emulation which will cause an
+-       * internal error exit.
+-       */
+-      if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT))
+-              return true;
+-
+       cr4 = kvm_read_cr4(vcpu);
+       smep = cr4 & X86_CR4_SMEP;
+       smap = cr4 & X86_CR4_SMAP;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 271669dc8d90a..7871b8e84b368 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3171,6 +3171,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
+               if (data & ~supported_xss)
+                       return 1;
+               vcpu->arch.ia32_xss = data;
++              kvm_update_cpuid_runtime(vcpu);
+               break;
+       case MSR_SMI_COUNT:
+               if (!msr_info->host_initiated)
+diff --git a/block/bio.c b/block/bio.c
+index 0703a208ca248..f8d26ce7b61b0 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -575,7 +575,8 @@ void bio_truncate(struct bio *bio, unsigned new_size)
+                               offset = new_size - done;
+                       else
+                               offset = 0;
+-                      zero_user(bv.bv_page, offset, bv.bv_len - offset);
++                      zero_user(bv.bv_page, bv.bv_offset + offset,
++                                bv.bv_len - offset);
+                       truncated = true;
+               }
+               done += bv.bv_len;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 847f33ffc4aed..9fa86288b78a9 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -719,6 +719,13 @@ void __init efi_systab_report_header(const 
efi_table_hdr_t *systab_hdr,
+               systab_hdr->revision >> 16,
+               systab_hdr->revision & 0xffff,
+               vendor);
++
++      if (IS_ENABLED(CONFIG_X86_64) &&
++          systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
++          !strcmp(vendor, "Apple")) {
++              pr_info("Apple Mac detected, using EFI v1.10 runtime services 
only\n");
++              efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
++      }
+ }
+ 
+ static __initdata char memory_type_name[][13] = {
+diff --git a/drivers/firmware/efi/libstub/arm64-stub.c 
b/drivers/firmware/efi/libstub/arm64-stub.c
+index c1b57dfb12776..415a971e76947 100644
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
+       if (image->image_base != _text)
+               efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus 
value\n");
+ 
+-      if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
+-              efi_err("FIRMWARE BUG: kernel image not aligned on %ldk 
boundary\n",
+-                      EFI_KIMG_ALIGN >> 10);
++      if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
++              efi_err("FIRMWARE BUG: kernel image not aligned on %dk 
boundary\n",
++                      SEGMENT_ALIGN >> 10);
+ 
+       kernel_size = _edata - _text;
+       kernel_memsize = kernel_size + (_end - _edata);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+index ed2c50011d445..ddf539f26f2da 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+@@ -469,8 +469,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void 
*data,
+               return -EINVAL;
+       }
+ 
+-      if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
+-          args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
++      if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
++          args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
+               DRM_ERROR("submit arguments out of size limits\n");
+               return -EINVAL;
+       }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+index a7a24539921f3..a6efc11eba93f 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+@@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
+               struct dpu_hw_pcc_cfg *cfg)
+ {
+ 
+-      u32 base = ctx->cap->sblk->pcc.base;
++      u32 base;
+ 
+-      if (!ctx || !base) {
++      if (!ctx) {
++              DRM_ERROR("invalid ctx %pK\n", ctx);
++              return;
++      }
++
++      base = ctx->cap->sblk->pcc.base;
++
++      if (!base) {
+               DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+               return;
+       }
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
+index 1adead764feed..f845333593daa 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.c
++++ b/drivers/gpu/drm/msm/dsi/dsi.c
+@@ -33,7 +33,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
+ 
+       of_node_put(phy_node);
+ 
+-      if (!phy_pdev || !msm_dsi->phy) {
++      if (!phy_pdev) {
++              DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", 
__func__);
++              return -EPROBE_DEFER;
++      }
++      if (!msm_dsi->phy) {
++              put_device(&phy_pdev->dev);
+               DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", 
__func__);
+               return -EPROBE_DEFER;
+       }
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c 
b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+index e8c1a727179cc..e07986ab52c22 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+@@ -769,12 +769,14 @@ void __exit msm_dsi_phy_driver_unregister(void)
+ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+                       struct msm_dsi_phy_clk_request *clk_req)
+ {
+-      struct device *dev = &phy->pdev->dev;
++      struct device *dev;
+       int ret;
+ 
+       if (!phy || !phy->cfg->ops.enable)
+               return -EINVAL;
+ 
++      dev = &phy->pdev->dev;
++
+       ret = dsi_phy_enable_resource(phy);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
+index 737453b6e5966..94f948ef279d1 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
+@@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
+ 
+       of_node_put(phy_node);
+ 
+-      if (!phy_pdev || !hdmi->phy) {
++      if (!phy_pdev) {
+               DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
+               return -EPROBE_DEFER;
+       }
++      if (!hdmi->phy) {
++              DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
++              put_device(&phy_pdev->dev);
++              return -EPROBE_DEFER;
++      }
+ 
+       hdmi->phy_dev = get_device(&phy_pdev->dev);
+ 
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 33e42b2f9cfcb..e37e5afc680a2 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -350,7 +350,7 @@ static int msm_init_vram(struct drm_device *dev)
+               of_node_put(node);
+               if (ret)
+                       return ret;
+-              size = r.end - r.start;
++              size = r.end - r.start + 1;
+               DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
+ 
+               /* if we have no IOMMU, then we need to use carveout allocator.
+diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
+index 959446b0137bc..a7142c32889c0 100644
+--- a/drivers/hwmon/lm90.c
++++ b/drivers/hwmon/lm90.c
+@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = {
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
+               .alert_alarms = 0x7c,
+-              .max_convrate = 8,
++              .max_convrate = 7,
+       },
+       [lm86] = {
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+@@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = {
+               .max_convrate = 9,
+       },
+       [max6646] = {
+-              .flags = LM90_HAVE_CRIT,
++              .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
+               .alert_alarms = 0x7c,
+               .max_convrate = 6,
+               .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+       },
+       [max6654] = {
++              .flags = LM90_HAVE_BROKEN_ALERT,
+               .alert_alarms = 0x7c,
+               .max_convrate = 7,
+               .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+@@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = {
+       },
+       [max6680] = {
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
+-                | LM90_HAVE_CRIT_ALRM_SWP,
++                | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
+               .alert_alarms = 0x7c,
+               .max_convrate = 7,
+       },
+diff --git a/drivers/media/platform/qcom/venus/core.c 
b/drivers/media/platform/qcom/venus/core.c
+index 1d621f7769035..62d11c6e41d60 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -375,8 +375,6 @@ static int venus_remove(struct platform_device *pdev)
+ 
+       hfi_destroy(core);
+ 
+-      v4l2_device_unregister(&core->v4l2_dev);
+-
+       mutex_destroy(&core->pm_lock);
+       mutex_destroy(&core->lock);
+       venus_dbgfs_deinit(core);
+diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c 
b/drivers/mtd/nand/raw/mpc5121_nfc.c
+index cb293c50acb87..5b9271b9c3265 100644
+--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
++++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
+@@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
+ /* Control chips select signal on ADS5121 board */
+ static void ads5121_select_chip(struct nand_chip *nand, int chip)
+ {
+-      struct mtd_info *mtd = nand_to_mtd(nand);
+       struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
+       u8 v;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 6e7da1dc2e8c3..d6580e942724d 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2382,8 +2382,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void 
*data)
+               break;
+       }
+ 
+-      if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+-              hclgevf_enable_vector(&hdev->misc_vector, true);
++      hclgevf_enable_vector(&hdev->misc_vector, true);
+ 
+       return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c 
b/drivers/net/ethernet/ibm/ibmvnic.c
+index 4f99d97638248..c7be7ab131b19 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -3401,11 +3401,25 @@ static void send_request_cap(struct ibmvnic_adapter 
*adapter, int retry)
+       struct device *dev = &adapter->vdev->dev;
+       union ibmvnic_crq crq;
+       int max_entries;
++      int cap_reqs;
++
++      /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
++       * the PROMISC flag). Initialize this count upfront. When the tasklet
++       * receives a response to all of these, it will send the next protocol
++       * message (QUERY_IP_OFFLOAD).
++       */
++      if (!(adapter->netdev->flags & IFF_PROMISC) ||
++          adapter->promisc_supported)
++              cap_reqs = 7;
++      else
++              cap_reqs = 6;
+ 
+       if (!retry) {
+               /* Sub-CRQ entries are 32 byte long */
+               int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+ 
++              atomic_set(&adapter->running_cap_crqs, cap_reqs);
++
+               if (adapter->min_tx_entries_per_subcrq > entries_page ||
+                   adapter->min_rx_add_entries_per_subcrq > entries_page) {
+                       dev_err(dev, "Fatal, invalid entries per sub-crq\n");
+@@ -3466,44 +3480,45 @@ static void send_request_cap(struct ibmvnic_adapter 
*adapter, int retry)
+                                       adapter->opt_rx_comp_queues;
+ 
+               adapter->req_rx_add_queues = adapter->max_rx_add_queues;
++      } else {
++              atomic_add(cap_reqs, &adapter->running_cap_crqs);
+       }
+-
+       memset(&crq, 0, sizeof(crq));
+       crq.request_capability.first = IBMVNIC_CRQ_CMD;
+       crq.request_capability.cmd = REQUEST_CAPABILITY;
+ 
+       crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
+       crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
+-      atomic_inc(&adapter->running_cap_crqs);
++      cap_reqs--;
+       ibmvnic_send_crq(adapter, &crq);
+ 
+       crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
+       crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
+-      atomic_inc(&adapter->running_cap_crqs);
++      cap_reqs--;
+       ibmvnic_send_crq(adapter, &crq);
+ 
+       crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
+       crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
+-      atomic_inc(&adapter->running_cap_crqs);
++      cap_reqs--;
+       ibmvnic_send_crq(adapter, &crq);
+ 
+       crq.request_capability.capability =
+           cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
+       crq.request_capability.number =
+           cpu_to_be64(adapter->req_tx_entries_per_subcrq);
+-      atomic_inc(&adapter->running_cap_crqs);
++      cap_reqs--;
+       ibmvnic_send_crq(adapter, &crq);
+ 
+       crq.request_capability.capability =
+           cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
+       crq.request_capability.number =
+           cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
+-      atomic_inc(&adapter->running_cap_crqs);
++      cap_reqs--;
+       ibmvnic_send_crq(adapter, &crq);
+ 
+       crq.request_capability.capability = cpu_to_be16(REQ_MTU);
+       crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
+-      atomic_inc(&adapter->running_cap_crqs);
++      cap_reqs--;
+       ibmvnic_send_crq(adapter, &crq);
+ 
+       if (adapter->netdev->flags & IFF_PROMISC) {
+@@ -3511,16 +3526,21 @@ static void send_request_cap(struct ibmvnic_adapter 
*adapter, int retry)
+                       crq.request_capability.capability =
+                           cpu_to_be16(PROMISC_REQUESTED);
+                       crq.request_capability.number = cpu_to_be64(1);
+-                      atomic_inc(&adapter->running_cap_crqs);
++                      cap_reqs--;
+                       ibmvnic_send_crq(adapter, &crq);
+               }
+       } else {
+               crq.request_capability.capability =
+                   cpu_to_be16(PROMISC_REQUESTED);
+               crq.request_capability.number = cpu_to_be64(0);
+-              atomic_inc(&adapter->running_cap_crqs);
++              cap_reqs--;
+               ibmvnic_send_crq(adapter, &crq);
+       }
++
++      /* Keep at end to catch any discrepancy between expected and actual
++       * CRQs sent.
++       */
++      WARN_ON(cap_reqs != 0);
+ }
+ 
+ static int pending_scrq(struct ibmvnic_adapter *adapter,
+@@ -3953,118 +3973,132 @@ static void send_query_map(struct ibmvnic_adapter 
*adapter)
+ static void send_query_cap(struct ibmvnic_adapter *adapter)
+ {
+       union ibmvnic_crq crq;
++      int cap_reqs;
++
++      /* We send out 25 QUERY_CAPABILITY CRQs below.  Initialize this count
++       * upfront. When the tasklet receives a response to all of these, it
++       * can send out the next protocol messaage (REQUEST_CAPABILITY).
++       */
++      cap_reqs = 25;
++
++      atomic_set(&adapter->running_cap_crqs, cap_reqs);
+ 
+-      atomic_set(&adapter->running_cap_crqs, 0);
+       memset(&crq, 0, sizeof(crq));
+       crq.query_capability.first = IBMVNIC_CRQ_CMD;
+       crq.query_capability.cmd = QUERY_CAPABILITY;
+ 
+       crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability =
+           cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability =
+           cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability =
+           cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability =
+           cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MIN_MTU);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MAX_MTU);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability =
+                       cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability =
+                       cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability =
+                       cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
+-      atomic_inc(&adapter->running_cap_crqs);
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
+ 
+       crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
+-      atomic_inc(&adapter->running_cap_crqs);
++
+       ibmvnic_send_crq(adapter, &crq);
++      cap_reqs--;
++
++      /* Keep at end to catch any discrepancy between expected and actual
++       * CRQs sent.
++       */
++      WARN_ON(cap_reqs != 0);
+ }
+ 
+ static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
+@@ -4369,6 +4403,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq 
*crq,
+       char *name;
+ 
+       atomic_dec(&adapter->running_cap_crqs);
++      netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
++                 atomic_read(&adapter->running_cap_crqs));
+       switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
+       case REQ_TX_QUEUES:
+               req_value = &adapter->req_tx_queues;
+@@ -5039,12 +5075,6 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
+                       ibmvnic_handle_crq(crq, adapter);
+                       crq->generic.first = 0;
+               }
+-
+-              /* remain in tasklet until all
+-               * capabilities responses are received
+-               */
+-              if (!adapter->wait_capability)
+-                      done = true;
+       }
+       /* if capabilities CRQ's were sent in this tasklet, the following
+        * tasklet must wait until all responses are received
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h 
b/drivers/net/ethernet/intel/i40e/i40e.h
+index 5b83d1bc0e74d..effdc3361266f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -172,7 +172,6 @@ enum i40e_interrupt_policy {
+ 
+ struct i40e_lump_tracking {
+       u16 num_entries;
+-      u16 search_hint;
+       u16 list[0];
+ #define I40E_PILE_VALID_BIT  0x8000
+ #define I40E_IWARP_IRQ_PILE_ID  (I40E_PILE_VALID_BIT - 2)
+@@ -755,12 +754,12 @@ struct i40e_vsi {
+       struct rtnl_link_stats64 net_stats_offsets;
+       struct i40e_eth_stats eth_stats;
+       struct i40e_eth_stats eth_stats_offsets;
+-      u32 tx_restart;
+-      u32 tx_busy;
++      u64 tx_restart;
++      u64 tx_busy;
+       u64 tx_linearize;
+       u64 tx_force_wb;
+-      u32 rx_buf_failed;
+-      u32 rx_page_failed;
++      u64 rx_buf_failed;
++      u64 rx_page_failed;
+ 
+       /* These are containers of ring pointers, allocated at run-time */
+       struct i40e_ring **rx_rings;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c 
b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+index 714b578b2b49c..1114a15a9ce3c 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int 
seid)
+                (unsigned long int)vsi->net_stats_offsets.rx_compressed,
+                (unsigned long int)vsi->net_stats_offsets.tx_compressed);
+       dev_info(&pf->pdev->dev,
+-               "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, 
rx_page_failed = %d\n",
++               "    tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, 
rx_page_failed = %llu\n",
+                vsi->tx_restart, vsi->tx_busy,
+                vsi->rx_buf_failed, vsi->rx_page_failed);
+       rcu_read_lock();
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c 
b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index f888a443a067b..bd18a780a0008 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -195,10 +195,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct 
i40e_virt_mem *mem)
+  * @id: an owner id to stick on the items assigned
+  *
+  * Returns the base item index of the lump, or negative for error
+- *
+- * The search_hint trick and lack of advanced fit-finding only work
+- * because we're highly likely to have all the same size lump requests.
+- * Linear search time and any fragmentation should be minimal.
+  **/
+ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
+                        u16 needed, u16 id)
+@@ -213,8 +209,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct 
i40e_lump_tracking *pile,
+               return -EINVAL;
+       }
+ 
+-      /* start the linear search with an imperfect hint */
+-      i = pile->search_hint;
++      /* Allocate last queue in the pile for FDIR VSI queue
++       * so it doesn't fragment the qp_pile
++       */
++      if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
++              if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
++                      dev_err(&pf->pdev->dev,
++                              "Cannot allocate queue %d for I40E_VSI_FDIR\n",
++                              pile->num_entries - 1);
++                      return -ENOMEM;
++              }
++              pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
++              return pile->num_entries - 1;
++      }
++
++      i = 0;
+       while (i < pile->num_entries) {
+               /* skip already allocated entries */
+               if (pile->list[i] & I40E_PILE_VALID_BIT) {
+@@ -233,7 +242,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct 
i40e_lump_tracking *pile,
+                       for (j = 0; j < needed; j++)
+                               pile->list[i+j] = id | I40E_PILE_VALID_BIT;
+                       ret = i;
+-                      pile->search_hint = i + j;
+                       break;
+               }
+ 
+@@ -256,7 +264,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, 
u16 index, u16 id)
+ {
+       int valid_id = (id | I40E_PILE_VALID_BIT);
+       int count = 0;
+-      int i;
++      u16 i;
+ 
+       if (!pile || index >= pile->num_entries)
+               return -EINVAL;
+@@ -268,8 +276,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, 
u16 index, u16 id)
+               count++;
+       }
+ 
+-      if (count && index < pile->search_hint)
+-              pile->search_hint = index;
+ 
+       return count;
+ }
+@@ -771,9 +777,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
+       struct rtnl_link_stats64 *ns;   /* netdev stats */
+       struct i40e_eth_stats *oes;
+       struct i40e_eth_stats *es;     /* device's eth stats */
+-      u32 tx_restart, tx_busy;
++      u64 tx_restart, tx_busy;
+       struct i40e_ring *p;
+-      u32 rx_page, rx_buf;
++      u64 rx_page, rx_buf;
+       u64 bytes, packets;
+       unsigned int start;
+       u64 tx_linearize;
+@@ -10130,15 +10136,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool 
reinit, bool lock_acquired)
+       }
+       i40e_get_oem_version(&pf->hw);
+ 
+-      if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
+-          ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
+-           hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
+-              /* The following delay is necessary for 4.33 firmware and older
+-               * to recover after EMP reset. 200 ms should suffice but we
+-               * put here 300 ms to be sure that FW is ready to operate
+-               * after reset.
+-               */
+-              mdelay(300);
++      if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
++              /* The following delay is necessary for firmware update. */
++              mdelay(1000);
+       }
+ 
+       /* re-verify the eeprom if we just had an EMP reset */
+@@ -11327,7 +11327,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf 
*pf)
+               return -ENOMEM;
+ 
+       pf->irq_pile->num_entries = vectors;
+-      pf->irq_pile->search_hint = 0;
+ 
+       /* track first vector for misc interrupts, ignore return */
+       (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
+@@ -12130,7 +12129,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
+               goto sw_init_done;
+       }
+       pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
+-      pf->qp_pile->search_hint = 0;
+ 
+       pf->tx_timeout_recovery_level = 1;
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h 
b/drivers/net/ethernet/intel/i40e/i40e_register.h
+index 564df22f3f463..8335f151ceefc 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
+@@ -279,6 +279,9 @@
+ #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* 
_i=0...511 */ /* Reset: VFR */
+ #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+ #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, 
I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
++#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
++#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, 
I40E_VFINT_ICR0_ADMINQ_SHIFT)
++#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ 
/* Reset: CORER */
+ #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* 
Reset: CORER */
+ #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+ #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 
b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 65c4c4fd359fa..f71b7334e2955 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1323,6 +1323,32 @@ static i40e_status 
i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+       return aq_ret;
+ }
+ 
++/**
++ * i40e_sync_vfr_reset
++ * @hw: pointer to hw struct
++ * @vf_id: VF identifier
++ *
++ * Before trigger hardware reset, we need to know if no other process has
++ * reserved the hardware for any reset operations. This check is done by
++ * examining the status of the RSTAT1 register used to signal the reset.
++ **/
++static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
++{
++      u32 reg;
++      int i;
++
++      for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
++              reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
++                         I40E_VFINT_ICR0_ADMINQ_MASK;
++              if (reg)
++                      return 0;
++
++              usleep_range(100, 200);
++      }
++
++      return -EAGAIN;
++}
++
+ /**
+  * i40e_trigger_vf_reset
+  * @vf: pointer to the VF structure
+@@ -1337,9 +1363,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, 
bool flr)
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg, reg_idx, bit_idx;
++      bool vf_active;
++      u32 radq;
+ 
+       /* warn the VF */
+-      clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
++      vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ 
+       /* Disable VF's configuration API during reset. The flag is re-enabled
+        * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
+@@ -1353,7 +1381,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, 
bool flr)
+        * just need to clean up, so don't hit the VFRTRIG register.
+        */
+       if (!flr) {
+-              /* reset VF using VPGEN_VFRTRIG reg */
++              /* Sync VFR reset before trigger next one */
++              radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
++                          I40E_VFINT_ICR0_ADMINQ_MASK;
++              if (vf_active && !radq)
++                      /* waiting for finish reset by virtual driver */
++                      if (i40e_sync_vfr_reset(hw, vf->vf_id))
++                              dev_info(&pf->pdev->dev,
++                                       "Reset VF %d never finished\n",
++                              vf->vf_id);
++
++              /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
++               * in progress state in rstat1 register.
++               */
+               reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+               reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+               wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+@@ -2563,6 +2603,59 @@ error_param:
+                                      aq_ret);
+ }
+ 
++/**
++ * i40e_check_enough_queue - find big enough queue number
++ * @vf: pointer to the VF info
++ * @needed: the number of items needed
++ *
++ * Returns the base item index of the queue, or negative for error
++ **/
++static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
++{
++      unsigned int  i, cur_queues, more, pool_size;
++      struct i40e_lump_tracking *pile;
++      struct i40e_pf *pf = vf->pf;
++      struct i40e_vsi *vsi;
++
++      vsi = pf->vsi[vf->lan_vsi_idx];
++      cur_queues = vsi->alloc_queue_pairs;
++
++      /* if current allocated queues are enough for need */
++      if (cur_queues >= needed)
++              return vsi->base_queue;
++
++      pile = pf->qp_pile;
++      if (cur_queues > 0) {
++              /* if the allocated queues are not zero
++               * just check if there are enough queues for more
++               * behind the allocated queues.
++               */
++              more = needed - cur_queues;
++              for (i = vsi->base_queue + cur_queues;
++                      i < pile->num_entries; i++) {
++                      if (pile->list[i] & I40E_PILE_VALID_BIT)
++                              break;
++
++                      if (more-- == 1)
++                              /* there is enough */
++                              return vsi->base_queue;
++              }
++      }
++
++      pool_size = 0;
++      for (i = 0; i < pile->num_entries; i++) {
++              if (pile->list[i] & I40E_PILE_VALID_BIT) {
++                      pool_size = 0;
++                      continue;
++              }
++              if (needed <= ++pool_size)
++                      /* there is enough */
++                      return i;
++      }
++
++      return -ENOMEM;
++}
++
+ /**
+  * i40e_vc_request_queues_msg
+  * @vf: pointer to the VF info
+@@ -2597,6 +2690,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf 
*vf, u8 *msg)
+                        req_pairs - cur_pairs,
+                        pf->queues_left);
+               vfres->num_queue_pairs = pf->queues_left + cur_pairs;
++      } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
++              dev_warn(&pf->pdev->dev,
++                       "VF %d requested %d more queues, but there is not 
enough for it.\n",
++                       vf->vf_id,
++                       req_pairs - cur_pairs);
++              vfres->num_queue_pairs = cur_pairs;
+       } else {
+               /* successful request */
+               vf->num_req_queues = req_pairs;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h 
b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+index 49575a640a84c..03c42fd0fea19 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+@@ -19,6 +19,7 @@
+ #define I40E_MAX_VF_PROMISC_FLAGS     3
+ 
+ #define I40E_VF_STATE_WAIT_COUNT      20
++#define I40E_VFR_WAIT_COUNT           100
+ 
+ /* Various queue ctrls */
+ enum i40e_queue_ctrl {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c 
b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 044a5b1196acb..161174be51c31 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -386,7 +386,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
+               dst_mdev->msg_size = mbox_hdr->msg_size;
+               dst_mdev->num_msgs = num_msgs;
+               err = otx2_sync_mbox_msg(dst_mbox);
+-              if (err) {
++              /* Error code -EIO indicate there is a communication failure
++               * to the AF. Rest of the error codes indicate that AF processed
++               * VF messages and set the error codes in response messages
++               * (if any) so simply forward responses to VF.
++               */
++              if (err == -EIO) {
+                       dev_warn(pf->dev,
+                                "AF not responding to VF%d messages\n", vf);
+                       /* restore PF mbase and exit */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index a8c5492cb39be..6d8a839fab22e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -816,8 +816,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
+       priv->hwts_tx_en = 0;
+       priv->hwts_rx_en = 0;
+ 
+-      stmmac_ptp_register(priv);
+-
+       return 0;
+ }
+ 
+@@ -2691,7 +2689,7 @@ static void stmmac_safety_feat_configuration(struct 
stmmac_priv *priv)
+ /**
+  * stmmac_hw_setup - setup mac in a usable state.
+  *  @dev : pointer to the device structure.
+- *  @init_ptp: initialize PTP if set
++ *  @ptp_register: register PTP if set
+  *  Description:
+  *  this is the main function to setup the HW in a usable state because the
+  *  dma engine is reset, the core registers are configured (e.g. AXI,
+@@ -2701,7 +2699,7 @@ static void stmmac_safety_feat_configuration(struct 
stmmac_priv *priv)
+  *  0 on success and an appropriate (-)ve integer as defined in errno.h
+  *  file on failure.
+  */
+-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
++static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_cnt = priv->plat->rx_queues_to_use;
+@@ -2757,13 +2755,13 @@ static int stmmac_hw_setup(struct net_device *dev, 
bool init_ptp)
+ 
+       stmmac_mmc_setup(priv);
+ 
+-      if (init_ptp) {
+-              ret = stmmac_init_ptp(priv);
+-              if (ret == -EOPNOTSUPP)
+-                      netdev_warn(priv->dev, "PTP not supported by HW\n");
+-              else if (ret)
+-                      netdev_warn(priv->dev, "PTP init failed\n");
+-      }
++      ret = stmmac_init_ptp(priv);
++      if (ret == -EOPNOTSUPP)
++              netdev_warn(priv->dev, "PTP not supported by HW\n");
++      else if (ret)
++              netdev_warn(priv->dev, "PTP init failed\n");
++      else if (ptp_register)
++              stmmac_ptp_register(priv);
+ 
+       priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw_priv.c 
b/drivers/net/ethernet/ti/cpsw_priv.c
+index 424e644724e46..e74f2e95a46eb 100644
+--- a/drivers/net/ethernet/ti/cpsw_priv.c
++++ b/drivers/net/ethernet/ti/cpsw_priv.c
+@@ -1144,7 +1144,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+ static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
+                                              int size)
+ {
+-      struct page_pool_params pp_params;
++      struct page_pool_params pp_params = {};
+       struct page_pool *pool;
+ 
+       pp_params.order = 0;
+diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
+index 5ab53e9942f30..5d30b3e1806ab 100644
+--- a/drivers/net/hamradio/yam.c
++++ b/drivers/net/hamradio/yam.c
+@@ -951,9 +951,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq 
*ifr, int cmd)
+                                sizeof(struct yamdrv_ioctl_mcs));
+               if (IS_ERR(ym))
+                       return PTR_ERR(ym);
+-              if (ym->cmd != SIOCYAMSMCS)
+-                      return -EINVAL;
+-              if (ym->bitrate > YAM_MAXBITRATE) {
++              if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
+                       kfree(ym);
+                       return -EINVAL;
+               }
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index dbed15dc0fe77..644861366d544 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -789,6 +789,7 @@ static struct phy_driver broadcom_drivers[] = {
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Broadcom BCM54616S",
+       /* PHY_GBIT_FEATURES */
++      .soft_reset     = genphy_soft_reset,
+       .config_init    = bcm54xx_config_init,
+       .config_aneg    = bcm54616s_config_aneg,
+       .ack_interrupt  = bcm_phy_ack_intr,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 85f3cde5ffd09..d2f6d8107595a 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1682,6 +1682,9 @@ void phy_detach(struct phy_device *phydev)
+           phy_driver_is_genphy_10g(phydev))
+               device_release_driver(&phydev->mdio.dev);
+ 
++      /* Assert the reset signal */
++      phy_device_reset(phydev, 1);
++
+       /*
+        * The phydev might go away on the put_device() below, so avoid
+        * a use-after-free bug by reading the underlying bus first.
+@@ -1693,9 +1696,6 @@ void phy_detach(struct phy_device *phydev)
+               ndev_owner = dev->dev.parent->driver->owner;
+       if (ndev_owner != bus->owner)
+               module_put(bus->owner);
+-
+-      /* Assert the reset signal */
+-      phy_device_reset(phydev, 1);
+ }
+ EXPORT_SYMBOL(phy_detach);
+ 
+diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
+index 4cf874fb5c5b4..a05d8372669c1 100644
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -609,6 +609,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle 
*fwnode)
+       else if (ret < 0)
+               return ERR_PTR(ret);
+ 
++      if (!fwnode_device_is_available(ref.fwnode)) {
++              fwnode_handle_put(ref.fwnode);
++              return NULL;
++      }
++
+       bus = sfp_bus_get(ref.fwnode);
+       fwnode_handle_put(ref.fwnode);
+       if (!bus)
+diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
+index 4bbbacdbf3bb7..be90d77c5168d 100644
+--- a/drivers/rpmsg/rpmsg_char.c
++++ b/drivers/rpmsg/rpmsg_char.c
+@@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void 
*data)
+       /* wake up any blocked readers */
+       wake_up_interruptible(&eptdev->readq);
+ 
+-      device_del(&eptdev->dev);
++      cdev_device_del(&eptdev->cdev, &eptdev->dev);
+       put_device(&eptdev->dev);
+ 
+       return 0;
+@@ -332,7 +332,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
+ 
+       ida_simple_remove(&rpmsg_ept_ida, dev->id);
+       ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
+-      cdev_del(&eptdev->cdev);
+       kfree(eptdev);
+ }
+ 
+@@ -377,19 +376,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev 
*ctrldev,
+       dev->id = ret;
+       dev_set_name(dev, "rpmsg%d", ret);
+ 
+-      ret = cdev_add(&eptdev->cdev, dev->devt, 1);
++      ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
+       if (ret)
+               goto free_ept_ida;
+ 
+       /* We can now rely on the release function for cleanup */
+       dev->release = rpmsg_eptdev_release_device;
+ 
+-      ret = device_add(dev);
+-      if (ret) {
+-              dev_err(dev, "device_add failed: %d\n", ret);
+-              put_device(dev);
+-      }
+-
+       return ret;
+ 
+ free_ept_ida:
+@@ -458,7 +451,6 @@ static void rpmsg_ctrldev_release_device(struct device 
*dev)
+ 
+       ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
+       ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+-      cdev_del(&ctrldev->cdev);
+       kfree(ctrldev);
+ }
+ 
+@@ -493,19 +485,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
+       dev->id = ret;
+       dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
+ 
+-      ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
++      ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
+       if (ret)
+               goto free_ctrl_ida;
+ 
+       /* We can now rely on the release function for cleanup */
+       dev->release = rpmsg_ctrldev_release_device;
+ 
+-      ret = device_add(dev);
+-      if (ret) {
+-              dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
+-              put_device(dev);
+-      }
+-
+       dev_set_drvdata(&rpdev->dev, ctrldev);
+ 
+       return ret;
+@@ -531,7 +517,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
+       if (ret)
+               dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
+ 
+-      device_del(&ctrldev->dev);
++      cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
+       put_device(&ctrldev->dev);
+ }
+ 
+diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
+index d24cafe02708f..511bf8e0a436c 100644
+--- a/drivers/s390/scsi/zfcp_fc.c
++++ b/drivers/s390/scsi/zfcp_fc.c
+@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
+               goto out;
+       }
+ 
++      /* re-init to undo drop from zfcp_fc_adisc() */
++      port->d_id = ntoh24(adisc_resp->adisc_port_id);
+       /* port is good, unblock rport without going through erp */
+       zfcp_scsi_schedule_rport_register(port);
+  out:
+@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
+       struct zfcp_fc_req *fc_req;
+       struct zfcp_adapter *adapter = port->adapter;
+       struct Scsi_Host *shost = adapter->scsi_host;
++      u32 d_id;
+       int ret;
+ 
+       fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
+       fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+       hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
+ 
+-      ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
++      d_id = port->d_id; /* remember as destination for send els below */
++      /*
++       * Force fresh GID_PN lookup on next port recovery.
++       * Must happen after request setup and before sending request,
++       * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
++       */
++      port->d_id = 0;
++
++      ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
+                               ZFCP_FC_CTELS_TMO);
+       if (ret)
+               kmem_cache_free(zfcp_fc_req_cache, fc_req);
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c 
b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+index 6890bbe04a8c1..052e7879704a5 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+@@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
+ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
+ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
+                                 struct device *parent, int npiv);
+-static void bnx2fc_destroy_work(struct work_struct *work);
++static void bnx2fc_port_destroy(struct fcoe_port *port);
+ 
+ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+ static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
+@@ -905,9 +905,6 @@ static void bnx2fc_indicate_netevent(void *context, 
unsigned long event,
+                               __bnx2fc_destroy(interface);
+               }
+               mutex_unlock(&bnx2fc_dev_lock);
+-
+-              /* Ensure ALL destroy work has been completed before return */
+-              flush_workqueue(bnx2fc_wq);
+               return;
+ 
+       default:
+@@ -1213,8 +1210,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
+       mutex_unlock(&n_port->lp_mutex);
+       bnx2fc_free_vport(interface->hba, port->lport);
+       bnx2fc_port_shutdown(port->lport);
++      bnx2fc_port_destroy(port);
+       bnx2fc_interface_put(interface);
+-      queue_work(bnx2fc_wq, &port->destroy_work);
+       return 0;
+ }
+ 
+@@ -1523,7 +1520,6 @@ static struct fc_lport *bnx2fc_if_create(struct 
bnx2fc_interface *interface,
+       port->lport = lport;
+       port->priv = interface;
+       port->get_netdev = bnx2fc_netdev;
+-      INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
+ 
+       /* Configure fcoe_port */
+       rc = bnx2fc_lport_config(lport);
+@@ -1651,8 +1647,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface 
*interface)
+       bnx2fc_interface_cleanup(interface);
+       bnx2fc_stop(interface);
+       list_del(&interface->list);
++      bnx2fc_port_destroy(port);
+       bnx2fc_interface_put(interface);
+-      queue_work(bnx2fc_wq, &port->destroy_work);
+ }
+ 
+ /**
+@@ -1692,15 +1688,12 @@ netdev_err:
+       return rc;
+ }
+ 
+-static void bnx2fc_destroy_work(struct work_struct *work)
++static void bnx2fc_port_destroy(struct fcoe_port *port)
+ {
+-      struct fcoe_port *port;
+       struct fc_lport *lport;
+ 
+-      port = container_of(work, struct fcoe_port, destroy_work);
+       lport = port->lport;
+-
+-      BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
++      BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, 
lport);
+ 
+       bnx2fc_if_destroy(lport);
+ }
+@@ -2554,9 +2547,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
+                       __bnx2fc_destroy(interface);
+       mutex_unlock(&bnx2fc_dev_lock);
+ 
+-      /* Ensure ALL destroy work has been completed before return */
+-      flush_workqueue(bnx2fc_wq);
+-
+       bnx2fc_ulp_stop(hba);
+       /* unregister cnic device */
+       if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index d76880ae68c83..b8f8621537720 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -317,6 +317,7 @@ static struct tty_driver *gsm_tty_driver;
+ #define GSM1_ESCAPE_BITS      0x20
+ #define XON                   0x11
+ #define XOFF                  0x13
++#define ISO_IEC_646_MASK      0x7F
+ 
+ static const struct tty_port_operations gsm_port_ops;
+ 
+@@ -526,7 +527,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, 
int len)
+       int olen = 0;
+       while (len--) {
+               if (*input == GSM1_SOF || *input == GSM1_ESCAPE
+-                  || *input == XON || *input == XOFF) {
++                  || (*input & ISO_IEC_646_MASK) == XON
++                  || (*input & ISO_IEC_646_MASK) == XOFF) {
+                       *output++ = GSM1_ESCAPE;
+                       *output++ = *input++ ^ GSM1_ESCAPE_BITS;
+                       olen++;
+diff --git a/drivers/tty/serial/8250/8250_of.c 
b/drivers/tty/serial/8250/8250_of.c
+index 65e9045dafe6d..5595c63c46eaf 100644
+--- a/drivers/tty/serial/8250/8250_of.c
++++ b/drivers/tty/serial/8250/8250_of.c
+@@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device 
*ofdev,
+               port->mapsize = resource_size(&resource);
+ 
+               /* Check for shifted address mapping */
+-              if (of_property_read_u32(np, "reg-offset", &prop) == 0)
++              if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
++                      if (prop >= port->mapsize) {
++                              dev_warn(&ofdev->dev, "reg-offset %u exceeds 
region size %pa\n",
++                                       prop, &port->mapsize);
++                              ret = -EINVAL;
++                              goto err_unprepare;
++                      }
++
+                       port->mapbase += prop;
++                      port->mapsize -= prop;
++              }
+ 
+               port->iotype = UPIO_MEM;
+               if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
+diff --git a/drivers/tty/serial/8250/8250_pci.c 
b/drivers/tty/serial/8250/8250_pci.c
+index 019328d644d8b..3a985e953b8e9 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -5171,8 +5171,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
+       {       PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,    /* 135a.0dc0 */
+               pbn_b2_4_115200 },
++      /* Brainboxes Devices */
+       /*
+-       * BrainBoxes UC-260
++      * Brainboxes UC-101
++      */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_2_115200 },
++      /*
++       * Brainboxes UC-235/246
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_1_115200 },
++      /*
++       * Brainboxes UC-257
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0861,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_2_115200 },
++      /*
++       * Brainboxes UC-260/271/701/756
+        */
+       {       PCI_VENDOR_ID_INTASHIELD, 0x0D21,
+               PCI_ANY_ID, PCI_ANY_ID,
+@@ -5180,7 +5202,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
+               pbn_b2_4_115200 },
+       {       PCI_VENDOR_ID_INTASHIELD, 0x0E34,
+               PCI_ANY_ID, PCI_ANY_ID,
+-               PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
++              PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
++              pbn_b2_4_115200 },
++      /*
++       * Brainboxes UC-268
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0841,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_4_115200 },
++      /*
++       * Brainboxes UC-275/279
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0881,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_8_115200 },
++      /*
++       * Brainboxes UC-302
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x08E1,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_2_115200 },
++      /*
++       * Brainboxes UC-310
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x08C1,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_2_115200 },
++      /*
++       * Brainboxes UC-313
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x08A3,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_2_115200 },
++      /*
++       * Brainboxes UC-320/324
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0A61,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_1_115200 },
++      /*
++       * Brainboxes UC-346
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0B02,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_4_115200 },
++      /*
++       * Brainboxes UC-357
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0A81,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_2_115200 },
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0A83,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_2_115200 },
++      /*
++       * Brainboxes UC-368
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0C41,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
++              pbn_b2_4_115200 },
++      /*
++       * Brainboxes UC-420/431
++       */
++      {       PCI_VENDOR_ID_INTASHIELD, 0x0921,
++              PCI_ANY_ID, PCI_ANY_ID,
++              0, 0,
+               pbn_b2_4_115200 },
+       /*
+        * Perle PCI-RAS cards
+diff --git a/drivers/tty/serial/stm32-usart.c 
b/drivers/tty/serial/stm32-usart.c
+index 844059861f9e1..0eadf0547175c 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -574,7 +574,7 @@ static void stm32_usart_start_tx(struct uart_port *port)
+       struct serial_rs485 *rs485conf = &port->rs485;
+       struct circ_buf *xmit = &port->state->xmit;
+ 
+-      if (uart_circ_empty(xmit))
++      if (uart_circ_empty(xmit) && !port->x_char)
+               return;
+ 
+       if (rs485conf->flags & SER_RS485_ENABLED) {
+diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
+index a18d7c4222ddf..82fe8e00a96a3 100644
+--- a/drivers/usb/common/ulpi.c
++++ b/drivers/usb/common/ulpi.c
+@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct 
device_driver *driver)
+       struct ulpi *ulpi = to_ulpi_dev(dev);
+       const struct ulpi_device_id *id;
+ 
+-      /* Some ULPI devices don't have a vendor id so rely on OF match */
+-      if (ulpi->id.vendor == 0)
++      /*
++       * Some ULPI devices don't have a vendor id
++       * or provide an id_table so rely on OF match.
++       */
++      if (ulpi->id.vendor == 0 || !drv->id_table)
+               return of_driver_match_device(dev, driver);
+ 
+       for (id = drv->id_table; id->vendor; id++)
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index b2710015493a5..ddd1d3eef912b 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1562,6 +1562,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t 
mem_flags)
+               urb->hcpriv = NULL;
+               INIT_LIST_HEAD(&urb->urb_list);
+               atomic_dec(&urb->use_count);
++              /*
++               * Order the write of urb->use_count above before the read
++               * of urb->reject below.  Pairs with the memory barriers in
++               * usb_kill_urb() and usb_poison_urb().
++               */
++              smp_mb__after_atomic();
++
+               atomic_dec(&urb->dev->urbnum);
+               if (atomic_read(&urb->reject))
+                       wake_up(&usb_kill_urb_queue);
+@@ -1666,6 +1673,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
+ 
+       usb_anchor_resume_wakeups(anchor);
+       atomic_dec(&urb->use_count);
++      /*
++       * Order the write of urb->use_count above before the read
++       * of urb->reject below.  Pairs with the memory barriers in
++       * usb_kill_urb() and usb_poison_urb().
++       */
++      smp_mb__after_atomic();
++
+       if (unlikely(atomic_read(&urb->reject)))
+               wake_up(&usb_kill_urb_queue);
+       usb_put_urb(urb);
+diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
+index 357b149b20d3a..9c285026f8276 100644
+--- a/drivers/usb/core/urb.c
++++ b/drivers/usb/core/urb.c
+@@ -706,6 +706,12 @@ void usb_kill_urb(struct urb *urb)
+       if (!(urb && urb->dev && urb->ep))
+               return;
+       atomic_inc(&urb->reject);
++      /*
++       * Order the write of urb->reject above before the read
++       * of urb->use_count below.  Pairs with the barriers in
++       * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
++       */
++      smp_mb__after_atomic();
+ 
+       usb_hcd_unlink_urb(urb, -ENOENT);
+       wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
+@@ -747,6 +753,12 @@ void usb_poison_urb(struct urb *urb)
+       if (!urb)
+               return;
+       atomic_inc(&urb->reject);
++      /*
++       * Order the write of urb->reject above before the read
++       * of urb->use_count below.  Pairs with the barriers in
++       * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
++       */
++      smp_mb__after_atomic();
+ 
+       if (!urb->dev || !urb->ep)
+               return;
+diff --git a/drivers/usb/gadget/function/f_sourcesink.c 
b/drivers/usb/gadget/function/f_sourcesink.c
+index 282737e4609ce..2c65a9bb3c81b 100644
+--- a/drivers/usb/gadget/function/f_sourcesink.c
++++ b/drivers/usb/gadget/function/f_sourcesink.c
+@@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, 
bool is_in,
+ 
+       if (is_iso) {
+               switch (speed) {
++              case USB_SPEED_SUPER_PLUS:
+               case USB_SPEED_SUPER:
+                       size = ss->isoc_maxpacket *
+                                       (ss->isoc_mult + 1) *
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index c1edcc9b13cec..dc570ce4e8319 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -437,6 +437,9 @@ static int __maybe_unused xhci_plat_suspend(struct device 
*dev)
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       int ret;
+ 
++      if (pm_runtime_suspended(dev))
++              pm_runtime_resume(dev);
++
+       ret = xhci_priv_suspend_quirk(hcd);
+       if (ret)
+               return ret;
+diff --git a/drivers/usb/storage/unusual_devs.h 
b/drivers/usb/storage/unusual_devs.h
+index 29191d33c0e3e..1a05e3dcfec8a 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2301,6 +2301,16 @@ UNUSUAL_DEV(  0x2027, 0xa001, 0x0000, 0x9999,
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               US_FL_SCM_MULT_TARG ),
+ 
++/*
++ * Reported by DocMAX <[email protected]>
++ * and Thomas Weißschuh <[email protected]>
++ */
++UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
++              "VIA Labs, Inc.",
++              "VL817 SATA Bridge",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_IGNORE_UAS),
++
+ UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
+               "ST",
+               "2A",
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 721d9c4ddc81f..8333c80b5f7c1 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -4164,7 +4164,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
+       case SNK_TRYWAIT_DEBOUNCE:
+               break;
+       case SNK_ATTACH_WAIT:
+-              tcpm_set_state(port, SNK_UNATTACHED, 0);
++      case SNK_DEBOUNCED:
++              /* Do nothing, as TCPM is still waiting for vbus to reaach 
VSAFE5V to connect */
+               break;
+ 
+       case SNK_NEGOTIATE_CAPABILITIES:
+diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c 
b/drivers/usb/typec/ucsi/ucsi_ccg.c
+index bff96d64dddff..6db7c8ddd51cd 100644
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
+@@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
+               if (status < 0)
+                       return status;
+ 
+-              if (!data)
++              if (!(data & DEV_INT))
+                       return 0;
+ 
+               status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 4dc9077dd2ac0..3c309ab208874 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -286,8 +286,6 @@ struct hvfb_par {
+ 
+ static uint screen_width = HVFB_WIDTH;
+ static uint screen_height = HVFB_HEIGHT;
+-static uint screen_width_max = HVFB_WIDTH;
+-static uint screen_height_max = HVFB_HEIGHT;
+ static uint screen_depth;
+ static uint screen_fb_size;
+ static uint dio_fb_size; /* FB size for deferred IO */
+@@ -581,7 +579,6 @@ static int synthvid_get_supported_resolution(struct 
hv_device *hdev)
+       int ret = 0;
+       unsigned long t;
+       u8 index;
+-      int i;
+ 
+       memset(msg, 0, sizeof(struct synthvid_msg));
+       msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
+@@ -612,13 +609,6 @@ static int synthvid_get_supported_resolution(struct 
hv_device *hdev)
+               goto out;
+       }
+ 
+-      for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
+-              screen_width_max = max_t(unsigned int, screen_width_max,
+-                  msg->resolution_resp.supported_resolution[i].width);
+-              screen_height_max = max_t(unsigned int, screen_height_max,
+-                  msg->resolution_resp.supported_resolution[i].height);
+-      }
+-
+       screen_width =
+               msg->resolution_resp.supported_resolution[index].width;
+       screen_height =
+@@ -940,7 +930,7 @@ static void hvfb_get_option(struct fb_info *info)
+ 
+       if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
+           (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
+-          (x > screen_width_max || y > screen_height_max)) ||
++          (x * y * screen_depth / 8 > screen_fb_size)) ||
+           (par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
+            x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
+           (par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
+@@ -1193,8 +1183,8 @@ static int hvfb_probe(struct hv_device *hdev,
+       }
+ 
+       hvfb_get_option(info);
+-      pr_info("Screen resolution: %dx%d, Color depth: %d\n",
+-              screen_width, screen_height, screen_depth);
++      pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: 
%d\n",
++              screen_width, screen_height, screen_depth, screen_fb_size);
+ 
+       ret = hvfb_getmem(hdev, info);
+       if (ret) {
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 040db0dfba264..b5e9bfe884c4b 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3103,10 +3103,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct 
file *file,
+       inode_lock(inode);
+       err = btrfs_delete_subvolume(dir, dentry);
+       inode_unlock(inode);
+-      if (!err) {
+-              fsnotify_rmdir(dir, dentry);
+-              d_delete(dentry);
+-      }
++      if (!err)
++              d_delete_notify(dir, dentry);
+ 
+ out_dput:
+       dput(dentry);
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 8ed881fd7440d..450050801f3b6 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -577,6 +577,7 @@ static int ceph_finish_async_create(struct inode *dir, 
struct dentry *dentry,
+       struct ceph_inode_info *ci = ceph_inode(dir);
+       struct inode *inode;
+       struct timespec64 now;
++      struct ceph_string *pool_ns;
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+       struct ceph_vino vino = { .ino = req->r_deleg_ino,
+                                 .snap = CEPH_NOSNAP };
+@@ -626,6 +627,12 @@ static int ceph_finish_async_create(struct inode *dir, 
struct dentry *dentry,
+       in.max_size = cpu_to_le64(lo->stripe_unit);
+ 
+       ceph_file_layout_to_legacy(lo, &in.layout);
++      /* lo is private, so pool_ns can't change */
++      pool_ns = rcu_dereference_raw(lo->pool_ns);
++      if (pool_ns) {
++              iinfo.pool_ns_len = pool_ns->len;
++              iinfo.pool_ns_data = pool_ns->str;
++      }
+ 
+       down_read(&mdsc->snap_rwsem);
+       ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
+@@ -743,8 +750,10 @@ retry:
+                               restore_deleg_ino(dir, req->r_deleg_ino);
+                               ceph_mdsc_put_request(req);
+                               try_async = false;
++                              
ceph_put_string(rcu_dereference_raw(lo.pool_ns));
+                               goto retry;
+                       }
++                      ceph_put_string(rcu_dereference_raw(lo.pool_ns));
+                       goto out_req;
+               }
+       }
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index b0983e2a4e2c7..32ddad3ec5d53 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -1805,8 +1805,8 @@ void configfs_unregister_group(struct config_group 
*group)
+       configfs_detach_group(&group->cg_item);
+       d_inode(dentry)->i_flags |= S_DEAD;
+       dont_mount(dentry);
++      d_drop(dentry);
+       fsnotify_rmdir(d_inode(parent), dentry);
+-      d_delete(dentry);
+       inode_unlock(d_inode(parent));
+ 
+       dput(dentry);
+@@ -1947,10 +1947,10 @@ void configfs_unregister_subsystem(struct 
configfs_subsystem *subsys)
+       configfs_detach_group(&group->cg_item);
+       d_inode(dentry)->i_flags |= S_DEAD;
+       dont_mount(dentry);
+-      fsnotify_rmdir(d_inode(root), dentry);
+       inode_unlock(d_inode(dentry));
+ 
+-      d_delete(dentry);
++      d_drop(dentry);
++      fsnotify_rmdir(d_inode(root), dentry);
+ 
+       inode_unlock(d_inode(root));
+ 
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index 42e5a766d33c7..4f25015aa5342 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry)
+ 
+       dentry->d_fsdata = NULL;
+       drop_nlink(dentry->d_inode);
+-      fsnotify_unlink(d_inode(dentry->d_parent), dentry);
+       d_drop(dentry);
++      fsnotify_unlink(d_inode(dentry->d_parent), dentry);
+       dput(dentry);   /* d_alloc_name() in devpts_pty_new() */
+ }
+ 
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 188f79d769881..b748329bb0bab 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -2795,6 +2795,7 @@ struct journal_head 
*jbd2_journal_grab_journal_head(struct buffer_head *bh)
+       jbd_unlock_bh_journal_head(bh);
+       return jh;
+ }
++EXPORT_SYMBOL(jbd2_journal_grab_journal_head);
+ 
+ static void __journal_remove_journal_head(struct buffer_head *bh)
+ {
+@@ -2847,6 +2848,7 @@ void jbd2_journal_put_journal_head(struct journal_head 
*jh)
+               jbd_unlock_bh_journal_head(bh);
+       }
+ }
++EXPORT_SYMBOL(jbd2_journal_put_journal_head);
+ 
+ /*
+  * Initialize jbd inode head
+diff --git a/fs/namei.c b/fs/namei.c
+index 4c9d0c36545d3..72f354b62dd5d 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3709,13 +3709,12 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+       dentry->d_inode->i_flags |= S_DEAD;
+       dont_mount(dentry);
+       detach_mounts(dentry);
+-      fsnotify_rmdir(dir, dentry);
+ 
+ out:
+       inode_unlock(dentry->d_inode);
+       dput(dentry);
+       if (!error)
+-              d_delete(dentry);
++              d_delete_notify(dir, dentry);
+       return error;
+ }
+ EXPORT_SYMBOL(vfs_rmdir);
+@@ -3825,7 +3824,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, 
struct inode **delegate
+                       if (!error) {
+                               dont_mount(dentry);
+                               detach_mounts(dentry);
+-                              fsnotify_unlink(dir, dentry);
+                       }
+               }
+       }
+@@ -3833,9 +3831,11 @@ out:
+       inode_unlock(target);
+ 
+       /* We don't d_delete() NFS sillyrenamed files--they still exist. */
+-      if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
++      if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
++              fsnotify_unlink(dir, dentry);
++      } else if (!error) {
+               fsnotify_link_count(target);
+-              d_delete(dentry);
++              d_delete_notify(dir, dentry);
+       }
+ 
+       return error;
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 8b963c72dd3b1..a23b7a5dec9ee 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1777,6 +1777,24 @@ out:
+ 
+ no_open:
+       res = nfs_lookup(dir, dentry, lookup_flags);
++      if (!res) {
++              inode = d_inode(dentry);
++              if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
++                  !S_ISDIR(inode->i_mode))
++                      res = ERR_PTR(-ENOTDIR);
++              else if (inode && S_ISREG(inode->i_mode))
++                      res = ERR_PTR(-EOPENSTALE);
++      } else if (!IS_ERR(res)) {
++              inode = d_inode(res);
++              if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
++                  !S_ISDIR(inode->i_mode)) {
++                      dput(res);
++                      res = ERR_PTR(-ENOTDIR);
++              } else if (inode && S_ISREG(inode->i_mode)) {
++                      dput(res);
++                      res = ERR_PTR(-EOPENSTALE);
++              }
++      }
+       if (switched) {
+               d_lookup_done(dentry);
+               if (!res)
+@@ -2174,6 +2192,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, 
struct dentry *dentry)
+ 
+       trace_nfs_link_enter(inode, dir, dentry);
+       d_drop(dentry);
++      if (S_ISREG(inode->i_mode))
++              nfs_sync_inode(inode);
+       error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
+       if (error == 0) {
+               ihold(inode);
+@@ -2262,6 +2282,8 @@ int nfs_rename(struct inode *old_dir, struct dentry 
*old_dentry,
+               }
+       }
+ 
++      if (S_ISREG(old_inode->i_mode))
++              nfs_sync_inode(old_inode);
+       task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
+       if (IS_ERR(task)) {
+               error = PTR_ERR(task);
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index a8f954bbde4f5..dedec4771ecc2 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1247,7 +1247,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct 
dentry *dentry)
+       clear_ncl(d_inode(dentry));
+       dget(dentry);
+       ret = simple_unlink(dir, dentry);
+-      d_delete(dentry);
++      d_drop(dentry);
++      fsnotify_unlink(dir, dentry);
+       dput(dentry);
+       WARN_ON_ONCE(ret);
+ }
+@@ -1336,8 +1337,8 @@ void nfsd_client_rmdir(struct dentry *dentry)
+       dget(dentry);
+       ret = simple_rmdir(dir, dentry);
+       WARN_ON_ONCE(ret);
++      d_drop(dentry);
+       fsnotify_rmdir(dir, dentry);
+-      d_delete(dentry);
+       dput(dentry);
+       inode_unlock(dir);
+ }
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index e7d04adb6cb87..4f48003e43271 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -1253,26 +1253,23 @@ static int ocfs2_test_bg_bit_allocatable(struct 
buffer_head *bg_bh,
+ {
+       struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+       struct journal_head *jh;
+-      int ret = 1;
++      int ret;
+ 
+       if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
+               return 0;
+ 
+-      if (!buffer_jbd(bg_bh))
++      jh = jbd2_journal_grab_journal_head(bg_bh);
++      if (!jh)
+               return 1;
+ 
+-      jbd_lock_bh_journal_head(bg_bh);
+-      if (buffer_jbd(bg_bh)) {
+-              jh = bh2jh(bg_bh);
+-              spin_lock(&jh->b_state_lock);
+-              bg = (struct ocfs2_group_desc *) jh->b_committed_data;
+-              if (bg)
+-                      ret = !ocfs2_test_bit(nr, (unsigned long 
*)bg->bg_bitmap);
+-              else
+-                      ret = 1;
+-              spin_unlock(&jh->b_state_lock);
+-      }
+-      jbd_unlock_bh_journal_head(bg_bh);
++      spin_lock(&jh->b_state_lock);
++      bg = (struct ocfs2_group_desc *) jh->b_committed_data;
++      if (bg)
++              ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
++      else
++              ret = 1;
++      spin_unlock(&jh->b_state_lock);
++      jbd2_journal_put_journal_head(jh);
+ 
+       return ret;
+ }
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 0dd2f93ac0480..d32b836f6ca74 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -257,10 +257,6 @@ int udf_expand_file_adinicb(struct inode *inode)
+       char *kaddr;
+       struct udf_inode_info *iinfo = UDF_I(inode);
+       int err;
+-      struct writeback_control udf_wbc = {
+-              .sync_mode = WB_SYNC_NONE,
+-              .nr_to_write = 1,
+-      };
+ 
+       WARN_ON_ONCE(!inode_is_locked(inode));
+       if (!iinfo->i_lenAlloc) {
+@@ -304,8 +300,10 @@ int udf_expand_file_adinicb(struct inode *inode)
+               iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+       /* from now on we have normal address_space methods */
+       inode->i_data.a_ops = &udf_aops;
++      set_page_dirty(page);
++      unlock_page(page);
+       up_write(&iinfo->i_data_sem);
+-      err = inode->i_data.a_ops->writepage(page, &udf_wbc);
++      err = filemap_fdatawrite(inode->i_mapping);
+       if (err) {
+               /* Restore everything back so that we don't lose data... */
+               lock_page(page);
+@@ -316,6 +314,7 @@ int udf_expand_file_adinicb(struct inode *inode)
+               unlock_page(page);
+               iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
+               inode->i_data.a_ops = &udf_adinicb_aops;
++              iinfo->i_lenAlloc = inode->i_size;
+               up_write(&iinfo->i_data_sem);
+       }
+       put_page(page);
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index f8acddcf54fb4..79add91eaa04e 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -203,6 +203,42 @@ static inline void fsnotify_link(struct inode *dir, 
struct inode *inode,
+       fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0);
+ }
+ 
++/*
++ * fsnotify_delete - @dentry was unlinked and unhashed
++ *
++ * Caller must make sure that dentry->d_name is stable.
++ *
++ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
++ * as this may be called after d_delete() and old_dentry may be negative.
++ */
++static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
++                                 struct dentry *dentry)
++{
++      __u32 mask = FS_DELETE;
++
++      if (S_ISDIR(inode->i_mode))
++              mask |= FS_ISDIR;
++
++      fsnotify_name(dir, mask, inode, &dentry->d_name, 0);
++}
++
++/**
++ * d_delete_notify - delete a dentry and call fsnotify_delete()
++ * @dentry: The dentry to delete
++ *
++ * This helper is used to guaranty that the unlinked inode cannot be found
++ * by lookup of this name after fsnotify_delete() event has been delivered.
++ */
++static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
++{
++      struct inode *inode = d_inode(dentry);
++
++      ihold(inode);
++      d_delete(dentry);
++      fsnotify_delete(dir, inode, dentry);
++      iput(inode);
++}
++
+ /*
+  * fsnotify_unlink - 'name' was unlinked
+  *
+@@ -210,10 +246,10 @@ static inline void fsnotify_link(struct inode *dir, 
struct inode *inode,
+  */
+ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
+ {
+-      /* Expected to be called before d_delete() */
+-      WARN_ON_ONCE(d_is_negative(dentry));
++      if (WARN_ON_ONCE(d_is_negative(dentry)))
++              return;
+ 
+-      fsnotify_dirent(dir, dentry, FS_DELETE);
++      fsnotify_delete(dir, d_inode(dentry), dentry);
+ }
+ 
+ /*
+@@ -233,10 +269,10 @@ static inline void fsnotify_mkdir(struct inode *inode, 
struct dentry *dentry)
+  */
+ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
+ {
+-      /* Expected to be called before d_delete() */
+-      WARN_ON_ONCE(d_is_negative(dentry));
++      if (WARN_ON_ONCE(d_is_negative(dentry)))
++              return;
+ 
+-      fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
++      fsnotify_delete(dir, d_inode(dentry), dentry);
+ }
+ 
+ /*
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 3476d20b75d49..fe3155736d635 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2543,6 +2543,7 @@ struct packet_type {
+                                             struct net_device *);
+       bool                    (*id_match)(struct packet_type *ptype,
+                                           struct sock *sk);
++      struct net              *af_packet_net;
+       void                    *af_packet_priv;
+       struct list_head        list;
+ };
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index c94551091dad3..67a50c78232fe 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -678,18 +678,6 @@ struct perf_event {
+       u64                             total_time_running;
+       u64                             tstamp;
+ 
+-      /*
+-       * timestamp shadows the actual context timing but it can
+-       * be safely used in NMI interrupt context. It reflects the
+-       * context time as it was when the event was last scheduled in,
+-       * or when ctx_sched_in failed to schedule the event because we
+-       * run out of PMC.
+-       *
+-       * ctx_time already accounts for ctx->timestamp. Therefore to
+-       * compute ctx_time for a sample, simply add perf_clock().
+-       */
+-      u64                             shadow_ctx_time;
+-
+       struct perf_event_attr          attr;
+       u16                             header_size;
+       u16                             id_header_size;
+@@ -834,6 +822,7 @@ struct perf_event_context {
+        */
+       u64                             time;
+       u64                             timestamp;
++      u64                             timeoffset;
+ 
+       /*
+        * These fields let us detect when two contexts have both
+@@ -916,6 +905,8 @@ struct bpf_perf_event_data_kern {
+ struct perf_cgroup_info {
+       u64                             time;
+       u64                             timestamp;
++      u64                             timeoffset;
++      int                             active;
+ };
+ 
+ struct perf_cgroup {
+diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
+index 0164fed31b06c..b9ccaeb8a4aef 100644
+--- a/include/linux/usb/role.h
++++ b/include/linux/usb/role.h
+@@ -90,6 +90,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node)
+ 
+ static inline void usb_role_switch_put(struct usb_role_switch *sw) { }
+ 
++static inline struct usb_role_switch *
++usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
++{
++      return NULL;
++}
++
+ static inline struct usb_role_switch *
+ usb_role_switch_register(struct device *parent,
+                        const struct usb_role_switch_desc *desc)
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 78ea3e332688f..e7ce719838b5e 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -6,6 +6,8 @@
+ #define RTR_SOLICITATION_INTERVAL     (4*HZ)
+ #define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ)       /* 1 hour */
+ 
++#define MIN_VALID_LIFETIME            (2*3600)        /* 2 hours */
++
+ #define TEMP_VALID_LIFETIME           (7*86400)
+ #define TEMP_PREFERRED_LIFETIME               (86400)
+ #define REGEN_MAX_RETRY                       (3)
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 5538e54d4620c..de2dc22a78f93 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -506,19 +506,18 @@ static inline void ip_select_ident_segs(struct net *net, 
struct sk_buff *skb,
+ {
+       struct iphdr *iph = ip_hdr(skb);
+ 
++      /* We had many attacks based on IPID, use the private
++       * generator as much as we can.
++       */
++      if (sk && inet_sk(sk)->inet_daddr) {
++              iph->id = htons(inet_sk(sk)->inet_id);
++              inet_sk(sk)->inet_id += segs;
++              return;
++      }
+       if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
+-              /* This is only to work around buggy Windows95/2000
+-               * VJ compression implementations.  If the ID field
+-               * does not change, they drop every other packet in
+-               * a TCP stream using header compression.
+-               */
+-              if (sk && inet_sk(sk)->inet_daddr) {
+-                      iph->id = htons(inet_sk(sk)->inet_id);
+-                      inet_sk(sk)->inet_id += segs;
+-              } else {
+-                      iph->id = 0;
+-              }
++              iph->id = 0;
+       } else {
++              /* Unfortunately we need the big hammer to get a suitable IPID 
*/
+               __ip_select_ident(net, iph, segs);
+       }
+ }
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index 88bc66b8d02b0..95d93ecf07371 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -280,7 +280,7 @@ static inline bool fib6_get_cookie_safe(const struct 
fib6_info *f6i,
+       fn = rcu_dereference(f6i->fib6_node);
+ 
+       if (fn) {
+-              *cookie = fn->fn_sernum;
++              *cookie = READ_ONCE(fn->fn_sernum);
+               /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
+               smp_rmb();
+               status = true;
+diff --git a/include/net/route.h b/include/net/route.h
+index ff021cab657e5..a07c277cd33e8 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -369,7 +369,7 @@ static inline struct neighbour *ip_neigh_gw4(struct 
net_device *dev,
+ {
+       struct neighbour *neigh;
+ 
+-      neigh = __ipv4_neigh_lookup_noref(dev, daddr);
++      neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
+       if (unlikely(!neigh))
+               neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
+ 
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index 4477873ac3a0b..56cd7e6589ff3 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -664,13 +664,14 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, 
task, void *, buf,
+          u32, size, u64, flags)
+ {
+       struct pt_regs *regs;
+-      long res;
++      long res = -EINVAL;
+ 
+       if (!try_get_task_stack(task))
+               return -EFAULT;
+ 
+       regs = task_pt_regs(task);
+-      res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
++      if (regs)
++              res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
+       put_task_stack(task);
+ 
+       return res;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e2d774cc470ee..c6493f7e02359 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -266,7 +266,7 @@ static void event_function_call(struct perf_event *event, 
event_f func, void *da
+       if (!event->parent) {
+               /*
+                * If this is a !child event, we must hold ctx::mutex to
+-               * stabilize the the event->ctx relation. See
++               * stabilize the event->ctx relation. See
+                * perf_event_ctx_lock().
+                */
+               lockdep_assert_held(&ctx->mutex);
+@@ -673,6 +673,23 @@ perf_event_set_state(struct perf_event *event, enum 
perf_event_state state)
+       WRITE_ONCE(event->state, state);
+ }
+ 
++/*
++ * UP store-release, load-acquire
++ */
++
++#define __store_release(ptr, val)                                     \
++do {                                                                  \
++      barrier();                                                      \
++      WRITE_ONCE(*(ptr), (val));                                      \
++} while (0)
++
++#define __load_acquire(ptr)                                           \
++({                                                                    \
++      __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr));        \
++      barrier();                                                      \
++      ___p;                                                           \
++})
++
+ #ifdef CONFIG_CGROUP_PERF
+ 
+ static inline bool
+@@ -718,34 +735,51 @@ static inline u64 perf_cgroup_event_time(struct 
perf_event *event)
+       return t->time;
+ }
+ 
+-static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
++static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 
now)
+ {
+-      struct perf_cgroup_info *info;
+-      u64 now;
+-
+-      now = perf_clock();
++      struct perf_cgroup_info *t;
+ 
+-      info = this_cpu_ptr(cgrp->info);
++      t = per_cpu_ptr(event->cgrp->info, event->cpu);
++      if (!__load_acquire(&t->active))
++              return t->time;
++      now += READ_ONCE(t->timeoffset);
++      return now;
++}
+ 
+-      info->time += now - info->timestamp;
++static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, 
bool adv)
++{
++      if (adv)
++              info->time += now - info->timestamp;
+       info->timestamp = now;
++      /*
++       * see update_context_time()
++       */
++      WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
+ }
+ 
+-static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context 
*cpuctx)
++static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context 
*cpuctx, bool final)
+ {
+       struct perf_cgroup *cgrp = cpuctx->cgrp;
+       struct cgroup_subsys_state *css;
++      struct perf_cgroup_info *info;
+ 
+       if (cgrp) {
++              u64 now = perf_clock();
++
+               for (css = &cgrp->css; css; css = css->parent) {
+                       cgrp = container_of(css, struct perf_cgroup, css);
+-                      __update_cgrp_time(cgrp);
++                      info = this_cpu_ptr(cgrp->info);
++
++                      __update_cgrp_time(info, now, true);
++                      if (final)
++                              __store_release(&info->active, 0);
+               }
+       }
+ }
+ 
+ static inline void update_cgrp_time_from_event(struct perf_event *event)
+ {
++      struct perf_cgroup_info *info;
+       struct perf_cgroup *cgrp;
+ 
+       /*
+@@ -759,8 +793,10 @@ static inline void update_cgrp_time_from_event(struct 
perf_event *event)
+       /*
+        * Do not update time when cgroup is not active
+        */
+-      if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
+-              __update_cgrp_time(event->cgrp);
++      if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) {
++              info = this_cpu_ptr(event->cgrp->info);
++              __update_cgrp_time(info, perf_clock(), true);
++      }
+ }
+ 
+ static inline void
+@@ -784,7 +820,8 @@ perf_cgroup_set_timestamp(struct task_struct *task,
+       for (css = &cgrp->css; css; css = css->parent) {
+               cgrp = container_of(css, struct perf_cgroup, css);
+               info = this_cpu_ptr(cgrp->info);
+-              info->timestamp = ctx->timestamp;
++              __update_cgrp_time(info, ctx->timestamp, false);
++              __store_release(&info->active, 1);
+       }
+ }
+ 
+@@ -980,14 +1017,6 @@ out:
+       return ret;
+ }
+ 
+-static inline void
+-perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
+-{
+-      struct perf_cgroup_info *t;
+-      t = per_cpu_ptr(event->cgrp->info, event->cpu);
+-      event->shadow_ctx_time = now - t->timestamp;
+-}
+-
+ static inline void
+ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context 
*ctx)
+ {
+@@ -1065,7 +1094,8 @@ static inline void update_cgrp_time_from_event(struct 
perf_event *event)
+ {
+ }
+ 
+-static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context 
*cpuctx)
++static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context 
*cpuctx,
++                                              bool final)
+ {
+ }
+ 
+@@ -1097,12 +1127,12 @@ perf_cgroup_switch(struct task_struct *task, struct 
task_struct *next)
+ {
+ }
+ 
+-static inline void
+-perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
++static inline u64 perf_cgroup_event_time(struct perf_event *event)
+ {
++      return 0;
+ }
+ 
+-static inline u64 perf_cgroup_event_time(struct perf_event *event)
++static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 
now)
+ {
+       return 0;
+ }
+@@ -1300,7 +1330,7 @@ static void put_ctx(struct perf_event_context *ctx)
+  * life-time rules separate them. That is an exiting task cannot fork, and a
+  * spawning task cannot (yet) exit.
+  *
+- * But remember that that these are parent<->child context relations, and
++ * But remember that these are parent<->child context relations, and
+  * migration does not affect children, therefore these two orderings should 
not
+  * interact.
+  *
+@@ -1439,7 +1469,7 @@ static u64 primary_event_id(struct perf_event *event)
+ /*
+  * Get the perf_event_context for a task and lock it.
+  *
+- * This has to cope with with the fact that until it is locked,
++ * This has to cope with the fact that until it is locked,
+  * the context could get moved to another task.
+  */
+ static struct perf_event_context *
+@@ -1524,22 +1554,59 @@ static void perf_unpin_context(struct 
perf_event_context *ctx)
+ /*
+  * Update the record of the current time in a context.
+  */
+-static void update_context_time(struct perf_event_context *ctx)
++static void __update_context_time(struct perf_event_context *ctx, bool adv)
+ {
+       u64 now = perf_clock();
+ 
+-      ctx->time += now - ctx->timestamp;
++      if (adv)
++              ctx->time += now - ctx->timestamp;
+       ctx->timestamp = now;
++
++      /*
++       * The above: time' = time + (now - timestamp), can be re-arranged
++       * into: time` = now + (time - timestamp), which gives a single value
++       * offset to compute future time without locks on.
++       *
++       * See perf_event_time_now(), which can be used from NMI context where
++       * it's (obviously) not possible to acquire ctx->lock in order to read
++       * both the above values in a consistent manner.
++       */
++      WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
++}
++
++static void update_context_time(struct perf_event_context *ctx)
++{
++      __update_context_time(ctx, true);
+ }
+ 
+ static u64 perf_event_time(struct perf_event *event)
+ {
+       struct perf_event_context *ctx = event->ctx;
+ 
++      if (unlikely(!ctx))
++              return 0;
++
+       if (is_cgroup_event(event))
+               return perf_cgroup_event_time(event);
+ 
+-      return ctx ? ctx->time : 0;
++      return ctx->time;
++}
++
++static u64 perf_event_time_now(struct perf_event *event, u64 now)
++{
++      struct perf_event_context *ctx = event->ctx;
++
++      if (unlikely(!ctx))
++              return 0;
++
++      if (is_cgroup_event(event))
++              return perf_cgroup_event_time_now(event, now);
++
++      if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
++              return ctx->time;
++
++      now += READ_ONCE(ctx->timeoffset);
++      return now;
+ }
+ 
+ static enum event_type_t get_event_type(struct perf_event *event)
+@@ -2333,7 +2400,7 @@ __perf_remove_from_context(struct perf_event *event,
+ 
+       if (ctx->is_active & EVENT_TIME) {
+               update_context_time(ctx);
+-              update_cgrp_time_from_cpuctx(cpuctx);
++              update_cgrp_time_from_cpuctx(cpuctx, false);
+       }
+ 
+       event_sched_out(event, cpuctx, ctx);
+@@ -2342,6 +2409,9 @@ __perf_remove_from_context(struct perf_event *event,
+       list_del_event(event, ctx);
+ 
+       if (!ctx->nr_events && ctx->is_active) {
++              if (ctx == &cpuctx->ctx)
++                      update_cgrp_time_from_cpuctx(cpuctx, true);
++
+               ctx->is_active = 0;
+               ctx->rotate_necessary = 0;
+               if (ctx->task) {
+@@ -2467,40 +2537,6 @@ void perf_event_disable_inatomic(struct perf_event 
*event)
+       irq_work_queue(&event->pending);
+ }
+ 
+-static void perf_set_shadow_time(struct perf_event *event,
+-                               struct perf_event_context *ctx)
+-{
+-      /*
+-       * use the correct time source for the time snapshot
+-       *
+-       * We could get by without this by leveraging the
+-       * fact that to get to this function, the caller
+-       * has most likely already called update_context_time()
+-       * and update_cgrp_time_xx() and thus both timestamp
+-       * are identical (or very close). Given that tstamp is,
+-       * already adjusted for cgroup, we could say that:
+-       *    tstamp - ctx->timestamp
+-       * is equivalent to
+-       *    tstamp - cgrp->timestamp.
+-       *
+-       * Then, in perf_output_read(), the calculation would
+-       * work with no changes because:
+-       * - event is guaranteed scheduled in
+-       * - no scheduled out in between
+-       * - thus the timestamp would be the same
+-       *
+-       * But this is a bit hairy.
+-       *
+-       * So instead, we have an explicit cgroup call to remain
+-       * within the time time source all along. We believe it
+-       * is cleaner and simpler to understand.
+-       */
+-      if (is_cgroup_event(event))
+-              perf_cgroup_set_shadow_time(event, event->tstamp);
+-      else
+-              event->shadow_ctx_time = event->tstamp - ctx->timestamp;
+-}
+-
+ #define MAX_INTERRUPTS (~0ULL)
+ 
+ static void perf_log_throttle(struct perf_event *event, int enable);
+@@ -2541,8 +2577,6 @@ event_sched_in(struct perf_event *event,
+ 
+       perf_pmu_disable(event->pmu);
+ 
+-      perf_set_shadow_time(event, ctx);
+-
+       perf_log_itrace_start(event);
+ 
+       if (event->pmu->add(event, PERF_EF_START)) {
+@@ -3216,16 +3250,6 @@ static void ctx_sched_out(struct perf_event_context 
*ctx,
+               return;
+       }
+ 
+-      ctx->is_active &= ~event_type;
+-      if (!(ctx->is_active & EVENT_ALL))
+-              ctx->is_active = 0;
+-
+-      if (ctx->task) {
+-              WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+-              if (!ctx->is_active)
+-                      cpuctx->task_ctx = NULL;
+-      }
+-
+       /*
+        * Always update time if it was set; not only when it changes.
+        * Otherwise we can 'forget' to update time for any but the last
+@@ -3239,7 +3263,22 @@ static void ctx_sched_out(struct perf_event_context 
*ctx,
+       if (is_active & EVENT_TIME) {
+               /* update (and stop) ctx time */
+               update_context_time(ctx);
+-              update_cgrp_time_from_cpuctx(cpuctx);
++              update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
++              /*
++               * CPU-release for the below ->is_active store,
++               * see __load_acquire() in perf_event_time_now()
++               */
++              barrier();
++      }
++
++      ctx->is_active &= ~event_type;
++      if (!(ctx->is_active & EVENT_ALL))
++              ctx->is_active = 0;
++
++      if (ctx->task) {
++              WARN_ON_ONCE(cpuctx->task_ctx != ctx);
++              if (!ctx->is_active)
++                      cpuctx->task_ctx = NULL;
+       }
+ 
+       is_active ^= ctx->is_active; /* changed bits */
+@@ -3676,13 +3715,19 @@ static noinline int visit_groups_merge(struct 
perf_cpu_context *cpuctx,
+       return 0;
+ }
+ 
++/*
++ * Because the userpage is strictly per-event (there is no concept of context,
++ * so there cannot be a context indirection), every userpage must be updated
++ * when context time starts :-(
++ *
++ * IOW, we must not miss EVENT_TIME edges.
++ */
+ static inline bool event_update_userpage(struct perf_event *event)
+ {
+       if (likely(!atomic_read(&event->mmap_count)))
+               return false;
+ 
+       perf_event_update_time(event);
+-      perf_set_shadow_time(event, event->ctx);
+       perf_event_update_userpage(event);
+ 
+       return true;
+@@ -3766,13 +3811,23 @@ ctx_sched_in(struct perf_event_context *ctx,
+            struct task_struct *task)
+ {
+       int is_active = ctx->is_active;
+-      u64 now;
+ 
+       lockdep_assert_held(&ctx->lock);
+ 
+       if (likely(!ctx->nr_events))
+               return;
+ 
++      if (is_active ^ EVENT_TIME) {
++              /* start ctx time */
++              __update_context_time(ctx, false);
++              perf_cgroup_set_timestamp(task, ctx);
++              /*
++               * CPU-release for the below ->is_active store,
++               * see __load_acquire() in perf_event_time_now()
++               */
++              barrier();
++      }
++
+       ctx->is_active |= (event_type | EVENT_TIME);
+       if (ctx->task) {
+               if (!is_active)
+@@ -3783,13 +3838,6 @@ ctx_sched_in(struct perf_event_context *ctx,
+ 
+       is_active ^= ctx->is_active; /* changed bits */
+ 
+-      if (is_active & EVENT_TIME) {
+-              /* start ctx time */
+-              now = perf_clock();
+-              ctx->timestamp = now;
+-              perf_cgroup_set_timestamp(task, ctx);
+-      }
+-
+       /*
+        * First go through the list and put on any pinned groups
+        * in order to give them the best chance of going on.
+@@ -4325,6 +4373,18 @@ static inline u64 perf_event_count(struct perf_event 
*event)
+       return local64_read(&event->count) + atomic64_read(&event->child_count);
+ }
+ 
++static void calc_timer_values(struct perf_event *event,
++                              u64 *now,
++                              u64 *enabled,
++                              u64 *running)
++{
++      u64 ctx_time;
++
++      *now = perf_clock();
++      ctx_time = perf_event_time_now(event, *now);
++      __perf_update_times(event, ctx_time, enabled, running);
++}
++
+ /*
+  * NMI-safe method to read a local event, that is an event that
+  * is:
+@@ -4384,10 +4444,9 @@ int perf_event_read_local(struct perf_event *event, u64 
*value,
+ 
+       *value = local64_read(&event->count);
+       if (enabled || running) {
+-              u64 now = event->shadow_ctx_time + perf_clock();
+-              u64 __enabled, __running;
++              u64 __enabled, __running, __now;;
+ 
+-              __perf_update_times(event, now, &__enabled, &__running);
++              calc_timer_values(event, &__now, &__enabled, &__running);
+               if (enabled)
+                       *enabled = __enabled;
+               if (running)
+@@ -5694,18 +5753,6 @@ static int perf_event_index(struct perf_event *event)
+       return event->pmu->event_idx(event);
+ }
+ 
+-static void calc_timer_values(struct perf_event *event,
+-                              u64 *now,
+-                              u64 *enabled,
+-                              u64 *running)
+-{
+-      u64 ctx_time;
+-
+-      *now = perf_clock();
+-      ctx_time = event->shadow_ctx_time + *now;
+-      __perf_update_times(event, ctx_time, enabled, running);
+-}
+-
+ static void perf_event_init_userpage(struct perf_event *event)
+ {
+       struct perf_event_mmap_page *userpg;
+@@ -6245,7 +6292,6 @@ accounting:
+               ring_buffer_attach(event, rb);
+ 
+               perf_event_update_time(event);
+-              perf_set_shadow_time(event, event->ctx);
+               perf_event_init_userpage(event);
+               perf_event_update_userpage(event);
+       } else {
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 00b0358739ab3..e1bbb3b92921d 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1735,7 +1735,7 @@ void uprobe_free_utask(struct task_struct *t)
+ }
+ 
+ /*
+- * Allocate a uprobe_task object for the task if if necessary.
++ * Allocate a uprobe_task object for the task if necessary.
+  * Called when the thread hits a breakpoint.
+  *
+  * Returns:
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 2f8cd616d3b29..f00dd928fc711 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1438,7 +1438,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
+ }
+ 
+ /*
+- * Performs the wakeup of the the top-waiter and re-enables preemption.
++ * Performs the wakeup of the top-waiter and re-enables preemption.
+  */
+ void rt_mutex_postunlock(struct wake_q_head *wake_q)
+ {
+@@ -1832,7 +1832,7 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex 
*lock)
+  *                    been started.
+  * @waiter:           the pre-initialized rt_mutex_waiter
+  *
+- * Wait for the the lock acquisition started on our behalf by
++ * Wait for the lock acquisition started on our behalf by
+  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
+  * rt_mutex_cleanup_proxy_lock().
+  *
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index a163542d178ee..cc5cc889b5b7f 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -1177,7 +1177,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int 
state)
+ 
+               /*
+                * If there were already threads queued before us and:
+-               *  1) there are no no active locks, wake the front
++               *  1) there are no active locks, wake the front
+                *     queued process(es) as the handoff bit might be set.
+                *  2) there are no active writers and some readers, the lock
+                *     must be read owned; so we try to wake any read lock
+diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
+index d9dd94defc0a9..9aa855a96c4ae 100644
+--- a/kernel/locking/semaphore.c
++++ b/kernel/locking/semaphore.c
+@@ -119,7 +119,7 @@ EXPORT_SYMBOL(down_killable);
+  * @sem: the semaphore to be acquired
+  *
+  * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
+- * been acquired successfully or 1 if it it cannot be acquired.
++ * been acquired successfully or 1 if it cannot be acquired.
+  *
+  * NOTE: This return value is inverted from both spin_trylock and
+  * mutex_trylock!  Be careful about this when converting code.
+diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
+index 105df4dfc7839..52571dcad768b 100644
+--- a/kernel/power/wakelock.c
++++ b/kernel/power/wakelock.c
+@@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
+ {
+       struct rb_node *node;
+       struct wakelock *wl;
+-      char *str = buf;
+-      char *end = buf + PAGE_SIZE;
++      int len = 0;
+ 
+       mutex_lock(&wakelocks_lock);
+ 
+       for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
+               wl = rb_entry(node, struct wakelock, node);
+               if (wl->ws->active == show_active)
+-                      str += scnprintf(str, end - str, "%s ", wl->name);
++                      len += sysfs_emit_at(buf, len, "%s ", wl->name);
+       }
+-      if (str > buf)
+-              str--;
+ 
+-      str += scnprintf(str, end - str, "\n");
++      len += sysfs_emit_at(buf, len, "\n");
+ 
+       mutex_unlock(&wakelocks_lock);
+-      return (str - buf);
++      return len;
+ }
+ 
+ #if CONFIG_PM_WAKELOCKS_LIMIT > 0
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 2a33cb5a10e59..acd9833b8ec22 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3379,7 +3379,6 @@ void set_task_rq_fair(struct sched_entity *se,
+       se->avg.last_update_time = n_last_update_time;
+ }
+ 
+-
+ /*
+  * When on migration a sched_entity joins/leaves the PELT hierarchy, we need 
to
+  * propagate its contribution. The key to this propagation is the invariant
+@@ -3447,7 +3446,6 @@ void set_task_rq_fair(struct sched_entity *se,
+  * XXX: only do this for the part of runnable > running ?
+  *
+  */
+-
+ static inline void
+ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct 
cfs_rq *gcfs_rq)
+ {
+@@ -3676,7 +3674,19 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ 
+               r = removed_util;
+               sub_positive(&sa->util_avg, r);
+-              sa->util_sum = sa->util_avg * divider;
++              sub_positive(&sa->util_sum, r * divider);
++              /*
++               * Because of rounding, se->util_sum might ends up being +1 
more than
++               * cfs->util_sum. Although this is not a problem by itself, 
detaching
++               * a lot of tasks with the rounding problem between 2 updates of
++               * util_avg (~1ms) can make cfs->util_sum becoming null whereas
++               * cfs_util_avg is not.
++               * Check that util_sum is still above its lower bound for the 
new
++               * util_avg. Given that period_contrib might have moved since 
the last
++               * sync, we are only sure that util_sum must be above or equal 
to
++               *    util_avg * minimum possible divider
++               */
++              sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * 
PELT_MIN_DIVIDER);
+ 
+               r = removed_runnable;
+               sub_positive(&sa->runnable_avg, r);
+@@ -5149,7 +5159,7 @@ static void do_sched_cfs_slack_timer(struct 
cfs_bandwidth *cfs_b)
+ /*
+  * When a group wakes up we want to make sure that its quota is not already
+  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
+- * runtime as update_curr() throttling can not not trigger until it's on-rq.
++ * runtime as update_curr() throttling can not trigger until it's on-rq.
+  */
+ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
+ {
+diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
+index 16f57e71f9c44..cc7cd512e4e33 100644
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -19,11 +19,11 @@
+ #endif
+ 
+ #ifdef CONFIG_RSEQ
+-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK         \
++#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK             \
+       (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ                  \
+-      | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
++      | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
+ #else
+-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
++#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK     0
+ #endif
+ 
+ #define MEMBARRIER_CMD_BITMASK                                                
\
+@@ -31,7 +31,8 @@
+       | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED                      \
+       | MEMBARRIER_CMD_PRIVATE_EXPEDITED                              \
+       | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED                     \
+-      | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
++      | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK                \
++      | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
+ 
+ static void ipi_mb(void *info)
+ {
+@@ -315,7 +316,7 @@ static int sync_runqueues_membarrier_state(struct 
mm_struct *mm)
+ 
+       /*
+        * For each cpu runqueue, if the task's mm match @mm, ensure that all
+-       * @mm's membarrier state set bits are also set in in the runqueue's
++       * @mm's membarrier state set bits are also set in the runqueue's
+        * membarrier state. This ensures that a runqueue scheduling
+        * between threads which are users of @mm has its membarrier state
+        * updated.
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 0b9aeebb9c325..45bf08e22207c 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -37,9 +37,11 @@ update_irq_load_avg(struct rq *rq, u64 running)
+ }
+ #endif
+ 
++#define PELT_MIN_DIVIDER      (LOAD_AVG_MAX - 1024)
++
+ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ {
+-      return LOAD_AVG_MAX - 1024 + avg->period_contrib;
++      return PELT_MIN_DIVIDER + avg->period_contrib;
+ }
+ 
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index cd2d094b9f820..a0729213f37be 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7257,7 +7257,8 @@ static struct tracing_log_err 
*get_tracing_log_err(struct trace_array *tr)
+               err = kzalloc(sizeof(*err), GFP_KERNEL);
+               if (!err)
+                       err = ERR_PTR(-ENOMEM);
+-              tr->n_err_log_entries++;
++              else
++                      tr->n_err_log_entries++;
+ 
+               return err;
+       }
+diff --git a/kernel/trace/trace_events_hist.c 
b/kernel/trace/trace_events_hist.c
+index 003e5f37861e3..1557a20b6500e 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -3506,6 +3506,7 @@ static int trace_action_create(struct hist_trigger_data 
*hist_data,
+ 
+                       var_ref_idx = find_var_ref_idx(hist_data, var_ref);
+                       if (WARN_ON(var_ref_idx < 0)) {
++                              kfree(p);
+                               ret = var_ref_idx;
+                               goto err;
+                       }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 7ffcca9ae82a1..72b4127360c7f 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5661,6 +5661,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, 
struct sk_buff *skb)
+               struct hci_ev_le_advertising_info *ev = ptr;
+               s8 rssi;
+ 
++              if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
++                      bt_dev_err(hdev, "Malicious advertising data.");
++                      break;
++              }
++
+               if (ev->length <= HCI_MAX_AD_LENGTH &&
+                   ev->data + ev->length <= skb_tail_pointer(skb)) {
+                       rssi = ev->data[ev->length];
+@@ -5672,11 +5677,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, 
struct sk_buff *skb)
+               }
+ 
+               ptr += sizeof(*ev) + ev->length + 1;
+-
+-              if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
+-                      bt_dev_err(hdev, "Malicious advertising data. Stopping 
processing");
+-                      break;
+-              }
+       }
+ 
+       hci_dev_unlock(hdev);
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 08c77418c687b..852f4b54e8811 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -543,10 +543,10 @@ static bool __allowed_ingress(const struct net_bridge 
*br,
+               if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
+                       if (*state == BR_STATE_FORWARDING) {
+                               *state = br_vlan_get_pvid_state(vg);
+-                              return br_vlan_state_allowed(*state, true);
+-                      } else {
+-                              return true;
++                              if (!br_vlan_state_allowed(*state, true))
++                                      goto drop;
+                       }
++                      return true;
+               }
+       }
+       v = br_vlan_find(vg, *vid);
+@@ -1873,7 +1873,8 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct 
netlink_callback *cb)
+                       goto out_err;
+               }
+               err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
+-              if (err && err != -EMSGSIZE)
++              /* if the dump completed without an error we return 0 here */
++              if (err != -EMSGSIZE)
+                       goto out_err;
+       } else {
+               for_each_netdev_rcu(net, dev) {
+diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
+index c714e6a9dad4c..eadb696360b48 100644
+--- a/net/core/net-procfs.c
++++ b/net/core/net-procfs.c
+@@ -193,12 +193,23 @@ static const struct seq_operations softnet_seq_ops = {
+       .show  = softnet_seq_show,
+ };
+ 
+-static void *ptype_get_idx(loff_t pos)
++static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
+ {
++      struct list_head *ptype_list = NULL;
+       struct packet_type *pt = NULL;
++      struct net_device *dev;
+       loff_t i = 0;
+       int t;
+ 
++      for_each_netdev_rcu(seq_file_net(seq), dev) {
++              ptype_list = &dev->ptype_all;
++              list_for_each_entry_rcu(pt, ptype_list, list) {
++                      if (i == pos)
++                              return pt;
++                      ++i;
++              }
++      }
++
+       list_for_each_entry_rcu(pt, &ptype_all, list) {
+               if (i == pos)
+                       return pt;
+@@ -219,22 +230,40 @@ static void *ptype_seq_start(struct seq_file *seq, 
loff_t *pos)
+       __acquires(RCU)
+ {
+       rcu_read_lock();
+-      return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
++      return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+ }
+ 
+ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
++      struct net_device *dev;
+       struct packet_type *pt;
+       struct list_head *nxt;
+       int hash;
+ 
+       ++*pos;
+       if (v == SEQ_START_TOKEN)
+-              return ptype_get_idx(0);
++              return ptype_get_idx(seq, 0);
+ 
+       pt = v;
+       nxt = pt->list.next;
++      if (pt->dev) {
++              if (nxt != &pt->dev->ptype_all)
++                      goto found;
++
++              dev = pt->dev;
++              for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
++                      if (!list_empty(&dev->ptype_all)) {
++                              nxt = dev->ptype_all.next;
++                              goto found;
++                      }
++              }
++
++              nxt = ptype_all.next;
++              goto ptype_all;
++      }
++
+       if (pt->type == htons(ETH_P_ALL)) {
++ptype_all:
+               if (nxt != &ptype_all)
+                       goto found;
+               hash = 0;
+@@ -263,7 +292,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
+ 
+       if (v == SEQ_START_TOKEN)
+               seq_puts(seq, "Type Device      Function\n");
+-      else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
++      else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, 
seq_file_net(seq))) &&
++               (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
+               if (pt->type == htons(ETH_P_ALL))
+                       seq_puts(seq, "ALL ");
+               else
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 10d4cde31c6bf..5e48b3d3a00db 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -162,12 +162,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const 
struct sock *sk,
+       iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
+       iph->saddr    = saddr;
+       iph->protocol = sk->sk_protocol;
+-      if (ip_dont_fragment(sk, &rt->dst)) {
++      /* Do not bother generating IPID for small packets (eg SYNACK) */
++      if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
+               iph->frag_off = htons(IP_DF);
+               iph->id = 0;
+       } else {
+               iph->frag_off = 0;
+-              __ip_select_ident(net, iph, 1);
++              /* TCP packets here are SYNACK with fat IPv4/TCP options.
++               * Avoid using the hashed IP ident generator.
++               */
++              if (sk->sk_protocol == IPPROTO_TCP)
++                      iph->id = (__force __be16)prandom_u32();
++              else
++                      __ip_select_ident(net, iph, 1);
+       }
+ 
+       if (opt && opt->opt.optlen) {
+@@ -614,18 +621,6 @@ void ip_fraglist_init(struct sk_buff *skb, struct iphdr 
*iph,
+ }
+ EXPORT_SYMBOL(ip_fraglist_init);
+ 
+-static void ip_fraglist_ipcb_prepare(struct sk_buff *skb,
+-                                   struct ip_fraglist_iter *iter)
+-{
+-      struct sk_buff *to = iter->frag;
+-
+-      /* Copy the flags to each fragment. */
+-      IPCB(to)->flags = IPCB(skb)->flags;
+-
+-      if (iter->offset == 0)
+-              ip_options_fragment(to);
+-}
+-
+ void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
+ {
+       unsigned int hlen = iter->hlen;
+@@ -671,7 +666,7 @@ void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
+ EXPORT_SYMBOL(ip_frag_init);
+ 
+ static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
+-                       bool first_frag, struct ip_frag_state *state)
++                       bool first_frag)
+ {
+       /* Copy the flags to each fragment. */
+       IPCB(to)->flags = IPCB(from)->flags;
+@@ -850,8 +845,20 @@ int ip_do_fragment(struct net *net, struct sock *sk, 
struct sk_buff *skb,
+                       /* Prepare header of the next frame,
+                        * before previous one went down. */
+                       if (iter.frag) {
+-                              ip_fraglist_ipcb_prepare(skb, &iter);
++                              bool first_frag = (iter.offset == 0);
++
++                              IPCB(iter.frag)->flags = IPCB(skb)->flags;
+                               ip_fraglist_prepare(skb, &iter);
++                              if (first_frag && IPCB(skb)->opt.optlen) {
++                                      /* ipcb->opt is not populated for frags
++                                       * coming from __ip_make_skb(),
++                                       * ip_options_fragment() needs optlen
++                                       */
++                                      IPCB(iter.frag)->opt.optlen =
++                                              IPCB(skb)->opt.optlen;
++                                      ip_options_fragment(iter.frag);
++                                      ip_send_check(iter.iph);
++                              }
+                       }
+ 
+                       skb->tstamp = tstamp;
+@@ -905,7 +912,7 @@ slow_path:
+                       err = PTR_ERR(skb2);
+                       goto fail;
+               }
+-              ip_frag_ipcb(skb, skb2, first_frag, &state);
++              ip_frag_ipcb(skb, skb2, first_frag);
+ 
+               /*
+                *      Put this fragment into the sending queue.
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 8ce8b7300b9d3..a5722905456c2 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -220,7 +220,8 @@ static struct sock *ping_lookup(struct net *net, struct 
sk_buff *skb, u16 ident)
+                       continue;
+               }
+ 
+-              if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
++              if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
++                  sk->sk_bound_dev_if != inet_sdif(skb))
+                       continue;
+ 
+               sock_hold(sk);
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 7d26e0f8bdaeb..5d95f80314f95 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -721,6 +721,7 @@ static int raw_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       int ret = -EINVAL;
+       int chk_addr_ret;
+ 
++      lock_sock(sk);
+       if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
+               goto out;
+ 
+@@ -740,7 +741,9 @@ static int raw_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+               inet->inet_saddr = 0;  /* Use device */
+       sk_dst_reset(sk);
+       ret = 0;
+-out:  return ret;
++out:
++      release_sock(sk);
++      return ret;
+ }
+ 
+ /*
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 29526937077b3..4dde49e628fab 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2577,7 +2577,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct 
net_device *dev,
+                                __u32 valid_lft, u32 prefered_lft)
+ {
+       struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
+-      int create = 0;
++      int create = 0, update_lft = 0;
+ 
+       if (!ifp && valid_lft) {
+               int max_addresses = in6_dev->cnf.max_addresses;
+@@ -2621,19 +2621,32 @@ int addrconf_prefix_rcv_add_addr(struct net *net, 
struct net_device *dev,
+               unsigned long now;
+               u32 stored_lft;
+ 
+-              /* Update lifetime (RFC4862 5.5.3 e)
+-               * We deviate from RFC4862 by honoring all Valid Lifetimes to
+-               * improve the reaction of SLAAC to renumbering events
+-               * (draft-gont-6man-slaac-renum-06, Section 4.2)
+-               */
++              /* update lifetime (RFC2462 5.5.3 e) */
+               spin_lock_bh(&ifp->lock);
+               now = jiffies;
+               if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
+                       stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
+               else
+                       stored_lft = 0;
+-
+               if (!create && stored_lft) {
++                      const u32 minimum_lft = min_t(u32,
++                              stored_lft, MIN_VALID_LIFETIME);
++                      valid_lft = max(valid_lft, minimum_lft);
++
++                      /* RFC4862 Section 5.5.3e:
++                       * "Note that the preferred lifetime of the
++                       *  corresponding address is always reset to
++                       *  the Preferred Lifetime in the received
++                       *  Prefix Information option, regardless of
++                       *  whether the valid lifetime is also reset or
++                       *  ignored."
++                       *
++                       * So we should always update prefered_lft here.
++                       */
++                      update_lft = 1;
++              }
++
++              if (update_lft) {
+                       ifp->valid_lft = valid_lft;
+                       ifp->prefered_lft = prefered_lft;
+                       ifp->tstamp = now;
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index e43f1fbac28b6..c783b91231321 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -110,7 +110,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info 
*f6i)
+       fn = rcu_dereference_protected(f6i->fib6_node,
+                       lockdep_is_held(&f6i->fib6_table->tb6_lock));
+       if (fn)
+-              fn->fn_sernum = fib6_new_sernum(net);
++              WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net));
+ }
+ 
+ /*
+@@ -587,12 +587,13 @@ static int fib6_dump_table(struct fib6_table *table, 
struct sk_buff *skb,
+               spin_unlock_bh(&table->tb6_lock);
+               if (res > 0) {
+                       cb->args[4] = 1;
+-                      cb->args[5] = w->root->fn_sernum;
++                      cb->args[5] = READ_ONCE(w->root->fn_sernum);
+               }
+       } else {
+-              if (cb->args[5] != w->root->fn_sernum) {
++              int sernum = READ_ONCE(w->root->fn_sernum);
++              if (cb->args[5] != sernum) {
+                       /* Begin at the root if the tree changed */
+-                      cb->args[5] = w->root->fn_sernum;
++                      cb->args[5] = sernum;
+                       w->state = FWS_INIT;
+                       w->node = w->root;
+                       w->skip = w->count;
+@@ -1342,7 +1343,7 @@ static void __fib6_update_sernum_upto_root(struct 
fib6_info *rt,
+       /* paired with smp_rmb() in rt6_get_cookie_safe() */
+       smp_wmb();
+       while (fn) {
+-              fn->fn_sernum = sernum;
++              WRITE_ONCE(fn->fn_sernum, sernum);
+               fn = rcu_dereference_protected(fn->parent,
+                               lockdep_is_held(&rt->fib6_table->tb6_lock));
+       }
+@@ -2171,8 +2172,8 @@ static int fib6_clean_node(struct fib6_walker *w)
+       };
+ 
+       if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
+-          w->node->fn_sernum != c->sernum)
+-              w->node->fn_sernum = c->sernum;
++          READ_ONCE(w->node->fn_sernum) != c->sernum)
++              WRITE_ONCE(w->node->fn_sernum, c->sernum);
+ 
+       if (!c->func) {
+               WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
+@@ -2536,7 +2537,7 @@ static void ipv6_route_seq_setup_walk(struct 
ipv6_route_iter *iter,
+       iter->w.state = FWS_INIT;
+       iter->w.node = iter->w.root;
+       iter->w.args = iter;
+-      iter->sernum = iter->w.root->fn_sernum;
++      iter->sernum = READ_ONCE(iter->w.root->fn_sernum);
+       INIT_LIST_HEAD(&iter->w.lh);
+       fib6_walker_link(net, &iter->w);
+ }
+@@ -2564,8 +2565,10 @@ static struct fib6_table 
*ipv6_route_seq_next_table(struct fib6_table *tbl,
+ 
+ static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
+ {
+-      if (iter->sernum != iter->w.root->fn_sernum) {
+-              iter->sernum = iter->w.root->fn_sernum;
++      int sernum = READ_ONCE(iter->w.root->fn_sernum);
++
++      if (iter->sernum != sernum) {
++              iter->sernum = sernum;
+               iter->w.state = FWS_INIT;
+               iter->w.node = iter->w.root;
+               WARN_ON(iter->w.skip);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 08441f06afd48..3a2741569b847 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1066,14 +1066,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
+ 
+               if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
+                                                     0, IFA_F_TENTATIVE)))
+-                      pr_warn("%s xmit: Local address not yet configured!\n",
+-                              p->name);
++                      pr_warn_ratelimited("%s xmit: Local address not yet 
configured!\n",
++                                          p->name);
+               else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
+                        !ipv6_addr_is_multicast(raddr) &&
+                        unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
+                                                         true, 0, 
IFA_F_TENTATIVE)))
+-                      pr_warn("%s xmit: Routing loop! Remote address found on 
this node!\n",
+-                              p->name);
++                      pr_warn_ratelimited("%s xmit: Routing loop! Remote 
address found on this node!\n",
++                                          p->name);
+               else
+                       ret = 1;
+               rcu_read_unlock();
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 654bf4ca61260..352e645c546eb 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2674,7 +2674,7 @@ static void ip6_link_failure(struct sk_buff *skb)
+                       if (from) {
+                               fn = rcu_dereference(from->fib6_node);
+                               if (fn && (rt->rt6i_flags & RTF_DEFAULT))
+-                                      fn->fn_sernum = -1;
++                                      WRITE_ONCE(fn->fn_sernum, -1);
+                       }
+               }
+               rcu_read_unlock();
+diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
+index f4cf26b606f92..8369af0c50eab 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1832,15 +1832,17 @@ repeat:
+               pr_debug("nf_conntrack_in: Can't track with proto module\n");
+               nf_conntrack_put(&ct->ct_general);
+               skb->_nfct = 0;
+-              NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+-              if (ret == -NF_DROP)
+-                      NF_CT_STAT_INC_ATOMIC(state->net, drop);
+               /* Special case: TCP tracker reports an attempt to reopen a
+                * closed/aborted connection. We have to go back and create a
+                * fresh conntrack.
+                */
+               if (ret == -NF_REPEAT)
+                       goto repeat;
++
++              NF_CT_STAT_INC_ATOMIC(state->net, invalid);
++              if (ret == -NF_DROP)
++                      NF_CT_STAT_INC_ATOMIC(state->net, drop);
++
+               ret = -ret;
+               goto out;
+       }
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 1ebee25de6772..6a8495bd08bb2 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -502,6 +502,9 @@ static int nft_payload_l4csum_offset(const struct 
nft_pktinfo *pkt,
+                                    struct sk_buff *skb,
+                                    unsigned int *l4csum_offset)
+ {
++      if (pkt->xt.fragoff)
++              return -1;
++
+       switch (pkt->tprot) {
+       case IPPROTO_TCP:
+               *l4csum_offset = offsetof(struct tcphdr, check);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index f78097aa403a8..6ef035494f30d 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1735,6 +1735,7 @@ static int fanout_add(struct sock *sk, struct 
fanout_args *args)
+               match->prot_hook.dev = po->prot_hook.dev;
+               match->prot_hook.func = packet_rcv_fanout;
+               match->prot_hook.af_packet_priv = match;
++              match->prot_hook.af_packet_net = read_pnet(&match->net);
+               match->prot_hook.id_match = match_fanout_group;
+               match->max_num_members = args->max_num_members;
+               list_add(&match->list, &fanout_list);
+@@ -3323,6 +3324,7 @@ static int packet_create(struct net *net, struct socket 
*sock, int protocol,
+               po->prot_hook.func = packet_rcv_spkt;
+ 
+       po->prot_hook.af_packet_priv = sk;
++      po->prot_hook.af_packet_net = sock_net(sk);
+ 
+       if (proto) {
+               po->prot_hook.type = proto;
+diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
+index 6be2672a65eab..df864e6922679 100644
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -157,7 +157,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call 
*call)
+ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
+ {
+       struct sk_buff *skb;
+-      unsigned long resend_at, rto_j;
++      unsigned long resend_at;
+       rxrpc_seq_t cursor, seq, top;
+       ktime_t now, max_age, oldest, ack_ts;
+       int ix;
+@@ -165,10 +165,8 @@ static void rxrpc_resend(struct rxrpc_call *call, 
unsigned long now_j)
+ 
+       _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
+ 
+-      rto_j = call->peer->rto_j;
+-
+       now = ktime_get_real();
+-      max_age = ktime_sub(now, jiffies_to_usecs(rto_j));
++      max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
+ 
+       spin_lock_bh(&call->lock);
+ 
+@@ -213,7 +211,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned 
long now_j)
+       }
+ 
+       resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
+-      resend_at += jiffies + rto_j;
++      resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans);
+       WRITE_ONCE(call->resend_at, resend_at);
+ 
+       if (unacked)
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index 10f2bf2e9068a..a45c83f22236e 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -468,7 +468,7 @@ done:
+                       if (call->peer->rtt_count > 1) {
+                               unsigned long nowj = jiffies, ack_lost_at;
+ 
+-                              ack_lost_at = rxrpc_get_rto_backoff(call->peer, 
retrans);
++                              ack_lost_at = rxrpc_get_rto_backoff(call->peer, 
false);
+                               ack_lost_at += nowj;
+                               WRITE_ONCE(call->ack_lost_at, ack_lost_at);
+                               rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index eadc0ede928c3..5f854ffbab925 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -599,9 +599,9 @@ static int __rpc_rmdir(struct inode *dir, struct dentry 
*dentry)
+ 
+       dget(dentry);
+       ret = simple_rmdir(dir, dentry);
++      d_drop(dentry);
+       if (!ret)
+               fsnotify_rmdir(dir, dentry);
+-      d_delete(dentry);
+       dput(dentry);
+       return ret;
+ }
+@@ -612,9 +612,9 @@ static int __rpc_unlink(struct inode *dir, struct dentry 
*dentry)
+ 
+       dget(dentry);
+       ret = simple_unlink(dir, dentry);
++      d_drop(dentry);
+       if (!ret)
+               fsnotify_unlink(dir, dentry);
+-      d_delete(dentry);
+       dput(dentry);
+       return ret;
+ }
+diff --git a/usr/include/Makefile b/usr/include/Makefile
+index f6b3c85d900ed..703a255cddc63 100644
+--- a/usr/include/Makefile
++++ b/usr/include/Makefile
+@@ -34,7 +34,6 @@ no-header-test += linux/hdlc/ioctl.h
+ no-header-test += linux/ivtv.h
+ no-header-test += linux/kexec.h
+ no-header-test += linux/matroxfb.h
+-no-header-test += linux/nfc.h
+ no-header-test += linux/omap3isp.h
+ no-header-test += linux/omapfb.h
+ no-header-test += linux/patchkey.h
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 4a7d377b3a500..d22de43925076 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1691,7 +1691,6 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct 
kvm_vcpu *vcpu, gfn_t gfn
+ {
+       return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
+ }
+-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
+ 
+ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
+ {

Reply via email to