commit:     e733b0241620ba620ba58ee2a86aebd811b73cc1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Sep  5 12:03:56 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Sep  5 12:03:56 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e733b024

Linux patch 5.10.141

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1140_linux-5.10.141.patch | 1181 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1185 insertions(+)

diff --git a/0000_README b/0000_README
index f5306e89..1da294a6 100644
--- a/0000_README
+++ b/0000_README
@@ -603,6 +603,10 @@ Patch:  1139_linux-5.10.140.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.140
 
+Patch:  1140_linux-5.10.141.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.141
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1140_linux-5.10.141.patch b/1140_linux-5.10.141.patch
new file mode 100644
index 00000000..c5adbeb2
--- /dev/null
+++ b/1140_linux-5.10.141.patch
@@ -0,0 +1,1181 @@
+diff --git a/Makefile b/Makefile
+index a80179d2c0057..d2833d29d65f5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 140
++SUBLEVEL = 141
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
+index f0bc4dc3e9bf0..6511d15ace45e 100644
+--- a/arch/s390/hypfs/hypfs_diag.c
++++ b/arch/s390/hypfs/hypfs_diag.c
+@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
+       int rc;
+ 
+       if (diag204_probe()) {
+-              pr_err("The hardware system does not support hypfs\n");
++              pr_info("The hardware system does not support hypfs\n");
+               return -ENODATA;
+       }
+ 
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index 5c97f48cea91d..ee919bfc81867 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
+       hypfs_vm_exit();
+ fail_hypfs_diag_exit:
+       hypfs_diag_exit();
++      pr_err("Initialization of hypfs failed with rc=%i\n", rc);
+ fail_dbfs_exit:
+       hypfs_dbfs_exit();
+-      pr_err("Initialization of hypfs failed with rc=%i\n", rc);
+       return rc;
+ }
+ device_initcall(hypfs_init)
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index ed517fad0d035..1866374356c84 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs 
*regs, int access)
+       flags = FAULT_FLAG_DEFAULT;
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+-      if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
++      if ((trans_exc_code & store_indication) == 0x400)
++              access = VM_WRITE;
++      if (access == VM_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+       mmap_read_lock(mm);
+ 
+diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
+index 0acd99329923c..07f5030073bbc 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -35,33 +35,56 @@
+ #define RSB_CLEAR_LOOPS               32      /* To forcibly overwrite all 
entries */
+ 
+ /*
++ * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
++ */
++#define __FILL_RETURN_SLOT                    \
++      ANNOTATE_INTRA_FUNCTION_CALL;           \
++      call    772f;                           \
++      int3;                                   \
++772:
++
++/*
++ * Stuff the entire RSB.
++ *
+  * Google experimented with loop-unrolling and this turned out to be
+  * the optimal version — two calls, each with their own speculation
+  * trap should their return address end up getting used, in a loop.
+  */
+-#define __FILL_RETURN_BUFFER(reg, nr, sp)     \
+-      mov     $(nr/2), reg;                   \
+-771:                                          \
+-      ANNOTATE_INTRA_FUNCTION_CALL;           \
+-      call    772f;                           \
+-773:  /* speculation trap */                  \
+-      UNWIND_HINT_EMPTY;                      \
+-      pause;                                  \
+-      lfence;                                 \
+-      jmp     773b;                           \
+-772:                                          \
+-      ANNOTATE_INTRA_FUNCTION_CALL;           \
+-      call    774f;                           \
+-775:  /* speculation trap */                  \
+-      UNWIND_HINT_EMPTY;                      \
+-      pause;                                  \
+-      lfence;                                 \
+-      jmp     775b;                           \
+-774:                                          \
+-      add     $(BITS_PER_LONG/8) * 2, sp;     \
+-      dec     reg;                            \
+-      jnz     771b;                           \
+-      /* barrier for jnz misprediction */     \
++#ifdef CONFIG_X86_64
++#define __FILL_RETURN_BUFFER(reg, nr)                 \
++      mov     $(nr/2), reg;                           \
++771:                                                  \
++      __FILL_RETURN_SLOT                              \
++      __FILL_RETURN_SLOT                              \
++      add     $(BITS_PER_LONG/8) * 2, %_ASM_SP;       \
++      dec     reg;                                    \
++      jnz     771b;                                   \
++      /* barrier for jnz misprediction */             \
++      lfence;
++#else
++/*
++ * i386 doesn't unconditionally have LFENCE, as such it can't
++ * do a loop.
++ */
++#define __FILL_RETURN_BUFFER(reg, nr)                 \
++      .rept nr;                                       \
++      __FILL_RETURN_SLOT;                             \
++      .endr;                                          \
++      add     $(BITS_PER_LONG/8) * nr, %_ASM_SP;
++#endif
++
++/*
++ * Stuff a single RSB slot.
++ *
++ * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
++ * forced to retire before letting a RET instruction execute.
++ *
++ * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
++ * before this point.
++ */
++#define __FILL_ONE_RETURN                             \
++      __FILL_RETURN_SLOT                              \
++      add     $(BITS_PER_LONG/8), %_ASM_SP;           \
+       lfence;
+ 
+ #ifdef __ASSEMBLY__
+@@ -120,28 +143,15 @@
+ #endif
+ .endm
+ 
+-.macro ISSUE_UNBALANCED_RET_GUARD
+-      ANNOTATE_INTRA_FUNCTION_CALL
+-      call .Lunbalanced_ret_guard_\@
+-      int3
+-.Lunbalanced_ret_guard_\@:
+-      add $(BITS_PER_LONG/8), %_ASM_SP
+-      lfence
+-.endm
+-
+  /*
+   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+   * monstrosity above, manually.
+   */
+-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
+-.ifb \ftr2
+-      ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
+-.else
+-      ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", 
\ftr2
+-.endif
+-      __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
+-.Lunbalanced_\@:
+-      ISSUE_UNBALANCED_RET_GUARD
++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req 
ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
++      ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
++              __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
++              __stringify(__FILL_ONE_RETURN), \ftr2
++
+ .Lskip_rsb_\@:
+ .endm
+ 
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 366b124057081..a5d5247c4f3e8 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -6069,6 +6069,7 @@ const struct file_operations binder_fops = {
+       .open = binder_open,
+       .flush = binder_flush,
+       .release = binder_release,
++      .may_pollfree = true,
+ };
+ 
+ static int __init init_binder_device(const char *name)
+diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
+index 2e3b76519b49d..b624f3d8f0e64 100644
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -327,7 +327,23 @@ static struct miscdevice udmabuf_misc = {
+ 
+ static int __init udmabuf_dev_init(void)
+ {
+-      return misc_register(&udmabuf_misc);
++      int ret;
++
++      ret = misc_register(&udmabuf_misc);
++      if (ret < 0) {
++              pr_err("Could not initialize udmabuf device\n");
++              return ret;
++      }
++
++      ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
++                                         DMA_BIT_MASK(64));
++      if (ret < 0) {
++              pr_err("Could not setup DMA mask for udmabuf device\n");
++              misc_deregister(&udmabuf_misc);
++              return ret;
++      }
++
++      return 0;
+ }
+ 
+ static void __exit udmabuf_dev_exit(void)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index d949d6c52f24b..ff5555353eb4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -283,7 +283,7 @@ enum amdgpu_kiq_irq {
+       AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
+       AMDGPU_CP_KIQ_IRQ_LAST
+ };
+-
++#define SRIOV_USEC_TIMEOUT  1200000 /* wait 12 * 100ms for SRIOV */
+ #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
+ #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
+ #define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index 150fa5258fb6f..2aa9242c58ab9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -371,6 +371,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct 
amdgpu_device *adev,
+       uint32_t seq;
+       uint16_t queried_pasid;
+       bool ret;
++      u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : 
adev->usec_timeout;
+       struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ 
+@@ -389,7 +390,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct 
amdgpu_device *adev,
+ 
+               amdgpu_ring_commit(ring);
+               spin_unlock(&adev->gfx.kiq.ring_lock);
+-              r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
++              r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
+               if (r < 1) {
+                       dev_err(adev->dev, "wait for kiq fence error: %ld.\n", 
r);
+                       return -ETIME;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 3a864041968f6..1673bf3bae55a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -839,6 +839,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct 
amdgpu_device *adev,
+       uint32_t seq;
+       uint16_t queried_pasid;
+       bool ret;
++      u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : 
adev->usec_timeout;
+       struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ 
+@@ -878,7 +879,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct 
amdgpu_device *adev,
+ 
+               amdgpu_ring_commit(ring);
+               spin_unlock(&adev->gfx.kiq.ring_lock);
+-              r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
++              r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
+               if (r < 1) {
+                       dev_err(adev->dev, "wait for kiq fence error: %ld.\n", 
r);
+                       up_read(&adev->reset_sem);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index bae3a146b2cc2..89cc852cb27c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper (
+               switch (pix_clk_params->color_depth) {
+               case COLOR_DEPTH_101010:
+                       actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 
5) >> 2;
++                      actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 
10;
+                       break;
+               case COLOR_DEPTH_121212:
+                       actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 
6) >> 2;
++                      actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 
10;
+                       break;
+               case COLOR_DEPTH_161616:
+                       actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+index 3fcd408e91032..855682590c1bb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+@@ -125,6 +125,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, 
int dpp_id)
+       while (tmp_mpcc != NULL) {
+               if (tmp_mpcc->dpp_id == dpp_id)
+                       return tmp_mpcc;
++
++              /* avoid circular linked list */
++              ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
++              if (tmp_mpcc == tmp_mpcc->mpcc_bot)
++                      break;
++
+               tmp_mpcc = tmp_mpcc->mpcc_bot;
+       }
+       return NULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+index 800be2693faca..963d72f96dca3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+@@ -464,6 +464,11 @@ void optc1_enable_optc_clock(struct timing_generator 
*optc, bool enable)
+                               OTG_CLOCK_ON, 1,
+                               1, 1000);
+       } else  {
++
++              //last chance to clear underflow, otherwise, it will always 
there due to clock is off.
++              if (optc->funcs->is_optc_underflow_occurred(optc) == true)
++                      optc->funcs->clear_optc_underflow(optc);
++
+               REG_UPDATE_2(OTG_CLOCK_CONTROL,
+                               OTG_CLOCK_GATE_DIS, 0,
+                               OTG_CLOCK_EN, 0);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+index 99cc095dc33c7..a701ea56c0aa0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+@@ -533,6 +533,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, 
int dpp_id)
+       while (tmp_mpcc != NULL) {
+               if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
+                       return tmp_mpcc;
++
++              /* avoid circular linked list */
++              ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
++              if (tmp_mpcc == tmp_mpcc->mpcc_bot)
++                      break;
++
+               tmp_mpcc = tmp_mpcc->mpcc_bot;
+       }
+       return NULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+index af462fe4260de..b0fd8859bd2f2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
+                       VMID, address->vmid);
+ 
+       if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
+-              REG_UPDATE(DCSURF_FLIP_CONTROL, 
SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
++              REG_UPDATE(DCSURF_FLIP_CONTROL, 
SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
+               REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 
0x1);
+ 
+       } else {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 8556c229ff598..49d7fa1d08427 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2759,6 +2759,7 @@ static const struct pptable_funcs 
sienna_cichlid_ppt_funcs = {
+       .dump_pptable = sienna_cichlid_dump_pptable,
+       .init_microcode = smu_v11_0_init_microcode,
+       .load_microcode = smu_v11_0_load_microcode,
++      .fini_microcode = smu_v11_0_fini_microcode,
+       .init_smc_tables = sienna_cichlid_init_smc_tables,
+       .fini_smc_tables = smu_v11_0_fini_smc_tables,
+       .init_power = smu_v11_0_init_power,
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index a3b151b29bd71..fc616db4231bb 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
+       int ret;
+ 
+       r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
++      if (!r) {
++              hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted -  
nothing to read\n");
++              return -EINVAL;
++      }
++
+       if (hid_report_len(r) < 64)
+               return -EINVAL;
+ 
+@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
+       int ret;
+ 
+       r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
++      if (!r) {
++              hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted -  
nothing to read\n");
++              return -EINVAL;
++      }
++
+       if (hid_report_len(r) < 64)
+               return -EINVAL;
+ 
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 2eee5e31c2b7e..fade7fcf6a146 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct 
file * file)
+       unsigned int minor = iminor(inode);
+       struct hidraw_list *list = file->private_data;
+       unsigned long flags;
++      int i;
+ 
+       mutex_lock(&minors_lock);
+ 
+       spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
++      for (i = list->tail; i < list->head; i++)
++              kfree(list->buffer[i].value);
+       list_del(&list->node);
+       spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
+       kfree(list);
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c 
b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+index fccd1798445d5..d22ce328a2797 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+@@ -2610,6 +2610,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface 
*intf,
+               del_timer_sync(&hdw->encoder_run_timer);
+               del_timer_sync(&hdw->encoder_wait_timer);
+               flush_work(&hdw->workpoll);
++              v4l2_device_unregister(&hdw->v4l2_dev);
+               usb_free_urb(hdw->ctl_read_urb);
+               usb_free_urb(hdw->ctl_write_urb);
+               kfree(hdw->ctl_read_buffer);
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index f5c965da95013..d71c113f428f6 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2293,6 +2293,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool 
recovery)
+       /* disable busy check */
+       sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ 
++      val = readl(host->base + MSDC_INT);
++      writel(val, host->base + MSDC_INT);
++
+       if (recovery) {
+               sdr_set_field(host->base + MSDC_DMA_CTRL,
+                             MSDC_DMA_CTRL_STOP, 1);
+@@ -2693,11 +2696,14 @@ static int __maybe_unused msdc_suspend(struct device 
*dev)
+ {
+       struct mmc_host *mmc = dev_get_drvdata(dev);
+       int ret;
++      u32 val;
+ 
+       if (mmc->caps2 & MMC_CAP2_CQE) {
+               ret = cqhci_suspend(mmc);
+               if (ret)
+                       return ret;
++              val = readl(((struct msdc_host *)mmc_priv(mmc))->base + 
MSDC_INT);
++              writel(val, ((struct msdc_host *)mmc_priv(mmc))->base + 
MSDC_INT);
+       }
+ 
+       return pm_runtime_force_suspend(dev);
+diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
+index 5ae81f2df45f7..3779b264dbec3 100644
+--- a/drivers/pci/pcie/portdrv_core.c
++++ b/drivers/pci/pcie/portdrv_core.c
+@@ -222,8 +222,15 @@ static int get_port_device_capability(struct pci_dev *dev)
+ 
+ #ifdef CONFIG_PCIEAER
+       if (dev->aer_cap && pci_aer_available() &&
+-          (pcie_ports_native || host->native_aer))
++          (pcie_ports_native || host->native_aer)) {
+               services |= PCIE_PORT_SERVICE_AER;
++
++              /*
++               * Disable AER on this port in case it's been enabled by the
++               * BIOS (the AER service driver will enable it when necessary).
++               */
++              pci_disable_pcie_error_reporting(dev);
++      }
+ #endif
+ 
+       /*
+diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
+index 0642555289e06..c12d46e283598 100644
+--- a/drivers/video/fbdev/pm2fb.c
++++ b/drivers/video/fbdev/pm2fb.c
+@@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, 
struct fb_info *info)
+               return -EINVAL;
+       }
+ 
++      if (!var->pixclock) {
++              DPRINTK("pixclock is zero\n");
++              return -EINVAL;
++      }
++
+       if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
+               DPRINTK("pixclock too high (%ldKHz)\n",
+                       PICOS2KHZ(var->pixclock));
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index a952288b2ab8e..9654b60a06a58 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -5198,6 +5198,11 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb 
*req,
+       struct io_ring_ctx *ctx = req->ctx;
+       bool cancel = false;
+ 
++      if (req->file->f_op->may_pollfree) {
++              spin_lock_irq(&ctx->completion_lock);
++              return -EOPNOTSUPP;
++      }
++
+       INIT_HLIST_NODE(&req->hash_node);
+       io_init_poll_iocb(poll, mask, wake_func);
+       poll->file = req->file;
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index b94fb5f81797a..41dc597b78cc6 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -248,6 +248,7 @@ static const struct file_operations signalfd_fops = {
+       .poll           = signalfd_poll,
+       .read           = signalfd_read,
+       .llseek         = noop_llseek,
++      .may_pollfree   = true,
+ };
+ 
+ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
+diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
+index db23e455eb91d..bc41ec0c483d0 100644
+--- a/fs/xfs/xfs_filestream.c
++++ b/fs/xfs/xfs_filestream.c
+@@ -128,11 +128,12 @@ xfs_filestream_pick_ag(
+               if (!pag->pagf_init) {
+                       err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
+                       if (err) {
+-                              xfs_perag_put(pag);
+-                              if (err != -EAGAIN)
++                              if (err != -EAGAIN) {
++                                      xfs_perag_put(pag);
+                                       return err;
++                              }
+                               /* Couldn't lock the AGF, skip this AG. */
+-                              continue;
++                              goto next_ag;
+                       }
+               }
+ 
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index ef1d5bb88b93a..775f833146e30 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -376,46 +376,36 @@ xfs_reserve_blocks(
+        * If the request is larger than the current reservation, reserve the
+        * blocks before we update the reserve counters. Sample m_fdblocks and
+        * perform a partial reservation if the request exceeds free space.
++       *
++       * The code below estimates how many blocks it can request from
++       * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
++       * race since fdblocks updates are not always coordinated via
++       * m_sb_lock.  Set the reserve size even if there's not enough free
++       * space to fill it because mod_fdblocks will refill an undersized
++       * reserve when it can.
+        */
+-      error = -ENOSPC;
+-      do {
+-              free = percpu_counter_sum(&mp->m_fdblocks) -
+-                                              mp->m_alloc_set_aside;
+-              if (free <= 0)
+-                      break;
+-
+-              delta = request - mp->m_resblks;
+-              lcounter = free - delta;
+-              if (lcounter < 0)
+-                      /* We can't satisfy the request, just get what we can */
+-                      fdblks_delta = free;
+-              else
+-                      fdblks_delta = delta;
+-
++      free = percpu_counter_sum(&mp->m_fdblocks) -
++                                              xfs_fdblocks_unavailable(mp);
++      delta = request - mp->m_resblks;
++      mp->m_resblks = request;
++      if (delta > 0 && free > 0) {
+               /*
+                * We'll either succeed in getting space from the free block
+-               * count or we'll get an ENOSPC. If we get a ENOSPC, it means
+-               * things changed while we were calculating fdblks_delta and so
+-               * we should try again to see if there is anything left to
+-               * reserve.
++               * count or we'll get an ENOSPC.  Don't set the reserved flag
++               * here - we don't want to reserve the extra reserve blocks
++               * from the reserve.
+                *
+-               * Don't set the reserved flag here - we don't want to reserve
+-               * the extra reserve blocks from the reserve.....
++               * The desired reserve size can change after we drop the lock.
++               * Use mod_fdblocks to put the space into the reserve or into
++               * fdblocks as appropriate.
+                */
++              fdblks_delta = min(free, delta);
+               spin_unlock(&mp->m_sb_lock);
+               error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
++              if (!error)
++                      xfs_mod_fdblocks(mp, fdblks_delta, 0);
+               spin_lock(&mp->m_sb_lock);
+-      } while (error == -ENOSPC);
+-
+-      /*
+-       * Update the reserve counters if blocks have been successfully
+-       * allocated.
+-       */
+-      if (!error && fdblks_delta) {
+-              mp->m_resblks += fdblks_delta;
+-              mp->m_resblks_avail += fdblks_delta;
+       }
+-
+ out:
+       if (outval) {
+               outval->resblks = mp->m_resblks;
+diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
+index dfa429b77ee28..3a6bc9dc11b5c 100644
+--- a/fs/xfs/xfs_mount.h
++++ b/fs/xfs/xfs_mount.h
+@@ -406,6 +406,14 @@ extern int        xfs_initialize_perag(xfs_mount_t *mp, 
xfs_agnumber_t agcount,
+                                    xfs_agnumber_t *maxagi);
+ extern void   xfs_unmountfs(xfs_mount_t *);
+ 
++/* Accessor added for 5.10.y backport */
++static inline uint64_t
++xfs_fdblocks_unavailable(
++      struct xfs_mount        *mp)
++{
++      return mp->m_alloc_set_aside;
++}
++
+ extern int    xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
+                                bool reserved);
+ extern int    xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
+diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
+index fe45b0c3970c1..288ea38c43ad0 100644
+--- a/fs/xfs/xfs_trans_dquot.c
++++ b/fs/xfs/xfs_trans_dquot.c
+@@ -615,7 +615,6 @@ xfs_dqresv_check(
+                       return QUOTA_NL_ISOFTLONGWARN;
+               }
+ 
+-              res->warnings++;
+               return QUOTA_NL_ISOFTWARN;
+       }
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 42d246a942283..c8f887641878f 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1859,6 +1859,7 @@ struct file_operations {
+                                  struct file *file_out, loff_t pos_out,
+                                  loff_t len, unsigned int remap_flags);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
++      bool may_pollfree;
+ } __randomize_layout;
+ 
+ struct inode_operations {
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 8d04e7deedc66..297744ea4dd0c 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -39,12 +39,15 @@ struct anon_vma {
+       atomic_t refcount;
+ 
+       /*
+-       * Count of child anon_vmas and VMAs which points to this anon_vma.
++       * Count of child anon_vmas. Equals to the count of all anon_vmas that
++       * have ->parent pointing to this one, including itself.
+        *
+        * This counter is used for making decision about reusing anon_vma
+        * instead of forking new one. See comments in function anon_vma_clone.
+        */
+-      unsigned degree;
++      unsigned long num_children;
++      /* Count of VMAs whose ->anon_vma pointer points to this object. */
++      unsigned long num_active_vmas;
+ 
+       struct anon_vma *parent;        /* Parent of this anon_vma */
+ 
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index acbf1875ad506..61fc053a4a4ef 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2222,6 +2222,14 @@ static inline void skb_set_tail_pointer(struct sk_buff 
*skb, const int offset)
+ 
+ #endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ 
++static inline void skb_assert_len(struct sk_buff *skb)
++{
++#ifdef CONFIG_DEBUG_NET
++      if (WARN_ONCE(!skb->len, "%s\n", __func__))
++              DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
++#endif /* CONFIG_DEBUG_NET */
++}
++
+ /*
+  *    Add data to an sk_buff
+  */
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 822c048934e3f..1138dd3071dbd 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -281,7 +281,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg 
*msg, u32 start)
+ 
+ static inline struct sk_psock *sk_psock(const struct sock *sk)
+ {
+-      return rcu_dereference_sk_user_data(sk);
++      return __rcu_dereference_sk_user_data_with_flags(sk,
++                                                       SK_USER_DATA_PSOCK);
+ }
+ 
+ static inline void sk_psock_queue_msg(struct sk_psock *psock,
+diff --git a/include/net/sock.h b/include/net/sock.h
+index d31c2b9107e54..d53fb64374767 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -527,14 +527,26 @@ enum sk_pacing {
+       SK_PACING_FQ            = 2,
+ };
+ 
+-/* Pointer stored in sk_user_data might not be suitable for copying
+- * when cloning the socket. For instance, it can point to a reference
+- * counted object. sk_user_data bottom bit is set if pointer must not
+- * be copied.
++/* flag bits in sk_user_data
++ *
++ * - SK_USER_DATA_NOCOPY:      Pointer stored in sk_user_data might
++ *   not be suitable for copying when cloning the socket. For instance,
++ *   it can point to a reference counted object. sk_user_data bottom
++ *   bit is set if pointer must not be copied.
++ *
++ * - SK_USER_DATA_BPF:         Mark whether sk_user_data field is
++ *   managed/owned by a BPF reuseport array. This bit should be set
++ *   when sk_user_data's sk is added to the bpf's reuseport_array.
++ *
++ * - SK_USER_DATA_PSOCK:       Mark whether pointer stored in
++ *   sk_user_data points to psock type. This bit should be set
++ *   when sk_user_data is assigned to a psock object.
+  */
+ #define SK_USER_DATA_NOCOPY   1UL
+-#define SK_USER_DATA_BPF      2UL     /* Managed by BPF */
+-#define SK_USER_DATA_PTRMASK  ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
++#define SK_USER_DATA_BPF      2UL
++#define SK_USER_DATA_PSOCK    4UL
++#define SK_USER_DATA_PTRMASK  ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
++                                SK_USER_DATA_PSOCK)
+ 
+ /**
+  * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
+@@ -547,24 +559,40 @@ static inline bool sk_user_data_is_nocopy(const struct 
sock *sk)
+ 
+ #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
+ 
++/**
++ * __rcu_dereference_sk_user_data_with_flags - return the pointer
++ * only if argument flags all has been set in sk_user_data. Otherwise
++ * return NULL
++ *
++ * @sk: socket
++ * @flags: flag bits
++ */
++static inline void *
++__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
++                                        uintptr_t flags)
++{
++      uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
++
++      WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
++
++      if ((sk_user_data & flags) == flags)
++              return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
++      return NULL;
++}
++
+ #define rcu_dereference_sk_user_data(sk)                              \
++      __rcu_dereference_sk_user_data_with_flags(sk, 0)
++#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags)          \
+ ({                                                                    \
+-      void *__tmp = rcu_dereference(__sk_user_data((sk)));            \
+-      (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK);              \
+-})
+-#define rcu_assign_sk_user_data(sk, ptr)                              \
+-({                                                                    \
+-      uintptr_t __tmp = (uintptr_t)(ptr);                             \
+-      WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK);                    \
+-      rcu_assign_pointer(__sk_user_data((sk)), __tmp);                \
+-})
+-#define rcu_assign_sk_user_data_nocopy(sk, ptr)                               
\
+-({                                                                    \
+-      uintptr_t __tmp = (uintptr_t)(ptr);                             \
+-      WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK);                    \
++      uintptr_t __tmp1 = (uintptr_t)(ptr),                            \
++                __tmp2 = (uintptr_t)(flags);                          \
++      WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK);                   \
++      WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK);                    \
+       rcu_assign_pointer(__sk_user_data((sk)),                        \
+-                         __tmp | SK_USER_DATA_NOCOPY);                \
++                         __tmp1 | __tmp2);                            \
+ })
++#define rcu_assign_sk_user_data(sk, ptr)                              \
++      __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
+ 
+ /*
+  * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index a397042e46607..a93407da0ae10 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1786,11 +1786,12 @@ static struct kprobe *__disable_kprobe(struct kprobe 
*p)
+               /* Try to disarm and disable this/parent probe */
+               if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
+                       /*
+-                       * If kprobes_all_disarmed is set, orig_p
+-                       * should have already been disarmed, so
+-                       * skip unneed disarming process.
++                       * Don't be lazy here.  Even if 'kprobes_all_disarmed'
++                       * is false, 'orig_p' might not have been armed yet.
++                       * Note arm_all_kprobes() __tries__ to arm all kprobes
++                       * on the best effort basis.
+                        */
+-                      if (!kprobes_all_disarmed) {
++                      if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
+                               ret = disarm_kprobe(orig_p, true);
+                               if (ret) {
+                                       p->flags &= ~KPROBE_FLAG_DISABLED;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index a63713dcd05d5..d868df6f13c86 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2899,6 +2899,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
+ 
+       ftrace_startup_enable(command);
+ 
++      /*
++       * If ftrace is in an undefined state, we just remove ops from list
++       * to prevent the NULL pointer, instead of totally rolling it back and
++       * free trampoline, because those actions could cause further damage.
++       */
++      if (unlikely(ftrace_disabled)) {
++              __unregister_ftrace_function(ops);
++              return -ENODEV;
++      }
++
+       ops->flags &= ~FTRACE_OPS_FL_ADDING;
+ 
+       return 0;
+diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
+index 2082af43d51fb..0717a0dcefed1 100644
+--- a/lib/crypto/Kconfig
++++ b/lib/crypto/Kconfig
+@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
+ 
+ config CRYPTO_LIB_CHACHA_GENERIC
+       tristate
+-      select XOR_BLOCKS
+       help
+         This symbol can be depended upon by arch implementations of the
+         ChaCha library interface that require the generic code as a
+diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
+index 2919f16981404..c6f6dee087460 100644
+--- a/lib/vdso/gettimeofday.c
++++ b/lib/vdso/gettimeofday.c
+@@ -46,8 +46,8 @@ static inline bool vdso_cycles_ok(u64 cycles)
+ #endif
+ 
+ #ifdef CONFIG_TIME_NS
+-static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+-                        struct __kernel_timespec *ts)
++static __always_inline int do_hres_timens(const struct vdso_data *vdns, 
clockid_t clk,
++                                        struct __kernel_timespec *ts)
+ {
+       const struct vdso_data *vd = __arch_get_timens_vdso_data();
+       const struct timens_offset *offs = &vdns->offset[clk];
+@@ -97,8 +97,8 @@ static __always_inline const struct vdso_data 
*__arch_get_timens_vdso_data(void)
+       return NULL;
+ }
+ 
+-static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+-                        struct __kernel_timespec *ts)
++static __always_inline int do_hres_timens(const struct vdso_data *vdns, 
clockid_t clk,
++                                        struct __kernel_timespec *ts)
+ {
+       return -EINVAL;
+ }
+@@ -159,8 +159,8 @@ static __always_inline int do_hres(const struct vdso_data 
*vd, clockid_t clk,
+ }
+ 
+ #ifdef CONFIG_TIME_NS
+-static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+-                          struct __kernel_timespec *ts)
++static __always_inline int do_coarse_timens(const struct vdso_data *vdns, 
clockid_t clk,
++                                          struct __kernel_timespec *ts)
+ {
+       const struct vdso_data *vd = __arch_get_timens_vdso_data();
+       const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+@@ -188,8 +188,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, 
clockid_t clk,
+       return 0;
+ }
+ #else
+-static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+-                          struct __kernel_timespec *ts)
++static __always_inline int do_coarse_timens(const struct vdso_data *vdns, 
clockid_t clk,
++                                          struct __kernel_timespec *ts)
+ {
+       return -1;
+ }
+diff --git a/mm/mmap.c b/mm/mmap.c
+index a1ee93f55cebb..b69c9711bb269 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2669,6 +2669,18 @@ static void unmap_region(struct mm_struct *mm,
+       tlb_gather_mmu(&tlb, mm, start, end);
+       update_hiwater_rss(mm);
+       unmap_vmas(&tlb, vma, start, end);
++
++      /*
++       * Ensure we have no stale TLB entries by the time this mapping is
++       * removed from the rmap.
++       * Note that we don't have to worry about nested flushes here because
++       * we're holding the mm semaphore for removing the mapping - so any
++       * concurrent flush in this region has to be coming through the rmap,
++       * and we synchronize against that using the rmap lock.
++       */
++      if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
++              tlb_flush_mmu(&tlb);
++
+       free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+                                next ? next->vm_start : USER_PGTABLES_CEILING);
+       tlb_finish_mmu(&tlb, start, end);
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 44ad7bf2e5631..e6f840be18906 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -89,7 +89,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
+       anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
+       if (anon_vma) {
+               atomic_set(&anon_vma->refcount, 1);
+-              anon_vma->degree = 1;   /* Reference for first vma */
++              anon_vma->num_children = 0;
++              anon_vma->num_active_vmas = 0;
+               anon_vma->parent = anon_vma;
+               /*
+                * Initialise the anon_vma root to point to itself. If called
+@@ -197,6 +198,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
+               anon_vma = anon_vma_alloc();
+               if (unlikely(!anon_vma))
+                       goto out_enomem_free_avc;
++              anon_vma->num_children++; /* self-parent link for new root */
+               allocated = anon_vma;
+       }
+ 
+@@ -206,8 +208,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
+       if (likely(!vma->anon_vma)) {
+               vma->anon_vma = anon_vma;
+               anon_vma_chain_link(vma, avc, anon_vma);
+-              /* vma reference or self-parent link for new root */
+-              anon_vma->degree++;
++              anon_vma->num_active_vmas++;
+               allocated = NULL;
+               avc = NULL;
+       }
+@@ -292,19 +293,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct 
vm_area_struct *src)
+               anon_vma_chain_link(dst, avc, anon_vma);
+ 
+               /*
+-               * Reuse existing anon_vma if its degree lower than two,
+-               * that means it has no vma and only one anon_vma child.
++               * Reuse existing anon_vma if it has no vma and only one
++               * anon_vma child.
+                *
+-               * Do not chose parent anon_vma, otherwise first child
+-               * will always reuse it. Root anon_vma is never reused:
++               * Root anon_vma is never reused:
+                * it has self-parent reference and at least one child.
+                */
+               if (!dst->anon_vma && src->anon_vma &&
+-                  anon_vma != src->anon_vma && anon_vma->degree < 2)
++                  anon_vma->num_children < 2 &&
++                  anon_vma->num_active_vmas == 0)
+                       dst->anon_vma = anon_vma;
+       }
+       if (dst->anon_vma)
+-              dst->anon_vma->degree++;
++              dst->anon_vma->num_active_vmas++;
+       unlock_anon_vma_root(root);
+       return 0;
+ 
+@@ -354,6 +355,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct 
vm_area_struct *pvma)
+       anon_vma = anon_vma_alloc();
+       if (!anon_vma)
+               goto out_error;
++      anon_vma->num_active_vmas++;
+       avc = anon_vma_chain_alloc(GFP_KERNEL);
+       if (!avc)
+               goto out_error_free_anon_vma;
+@@ -374,7 +376,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct 
vm_area_struct *pvma)
+       vma->anon_vma = anon_vma;
+       anon_vma_lock_write(anon_vma);
+       anon_vma_chain_link(vma, avc, anon_vma);
+-      anon_vma->parent->degree++;
++      anon_vma->parent->num_children++;
+       anon_vma_unlock_write(anon_vma);
+ 
+       return 0;
+@@ -406,7 +408,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
+                * to free them outside the lock.
+                */
+               if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
+-                      anon_vma->parent->degree--;
++                      anon_vma->parent->num_children--;
+                       continue;
+               }
+ 
+@@ -414,7 +416,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
+               anon_vma_chain_free(avc);
+       }
+       if (vma->anon_vma)
+-              vma->anon_vma->degree--;
++              vma->anon_vma->num_active_vmas--;
+       unlock_anon_vma_root(root);
+ 
+       /*
+@@ -425,7 +427,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
+       list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
+               struct anon_vma *anon_vma = avc->anon_vma;
+ 
+-              VM_WARN_ON(anon_vma->degree);
++              VM_WARN_ON(anon_vma->num_children);
++              VM_WARN_ON(anon_vma->num_active_vmas);
+               put_anon_vma(anon_vma);
+ 
+               list_del(&avc->same_vma);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 88980015ba813..0c38af2ff2097 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -1988,11 +1988,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int 
state, __le16 psm,
+                       src_match = !bacmp(&c->src, src);
+                       dst_match = !bacmp(&c->dst, dst);
+                       if (src_match && dst_match) {
+-                              c = l2cap_chan_hold_unless_zero(c);
+-                              if (c) {
+-                                      read_unlock(&chan_list_lock);
+-                                      return c;
+-                              }
++                              if (!l2cap_chan_hold_unless_zero(c))
++                                      continue;
++
++                              read_unlock(&chan_list_lock);
++                              return c;
+                       }
+ 
+                       /* Closest match */
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index f8b231bbbe381..2983e926fe3cc 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -441,6 +441,9 @@ static int convert___skb_to_skb(struct sk_buff *skb, 
struct __sk_buff *__skb)
+ {
+       struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
+ 
++      if (!skb->len)
++              return -EINVAL;
++
+       if (!__skb)
+               return 0;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 8355cc5e11a98..34b5aab42b912 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4097,6 +4097,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct 
net_device *sb_dev)
+       bool again = false;
+ 
+       skb_reset_mac_header(skb);
++      skb_assert_len(skb);
+ 
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
+               __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 52a1c8725337b..434c5aab83ea2 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n)
+       return 0;
+ }
+ 
+-static void pneigh_queue_purge(struct sk_buff_head *list)
++static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
+ {
++      struct sk_buff_head tmp;
++      unsigned long flags;
+       struct sk_buff *skb;
+ 
+-      while ((skb = skb_dequeue(list)) != NULL) {
++      skb_queue_head_init(&tmp);
++      spin_lock_irqsave(&list->lock, flags);
++      skb = skb_peek(list);
++      while (skb != NULL) {
++              struct sk_buff *skb_next = skb_peek_next(skb, list);
++              if (net == NULL || net_eq(dev_net(skb->dev), net)) {
++                      __skb_unlink(skb, list);
++                      __skb_queue_tail(&tmp, skb);
++              }
++              skb = skb_next;
++      }
++      spin_unlock_irqrestore(&list->lock, flags);
++
++      while ((skb = __skb_dequeue(&tmp))) {
+               dev_put(skb->dev);
+               kfree_skb(skb);
+       }
+@@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct 
net_device *dev,
+       write_lock_bh(&tbl->lock);
+       neigh_flush_dev(tbl, dev, skip_perm);
+       pneigh_ifdown_and_unlock(tbl, dev);
+-
+-      del_timer_sync(&tbl->proxy_timer);
+-      pneigh_queue_purge(&tbl->proxy_queue);
++      pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
++      if (skb_queue_empty_lockless(&tbl->proxy_queue))
++              del_timer_sync(&tbl->proxy_timer);
+       return 0;
+ }
+ 
+@@ -1743,7 +1758,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
+       /* It is not clean... Fix it to unload IPv6 module safely */
+       cancel_delayed_work_sync(&tbl->gc_work);
+       del_timer_sync(&tbl->proxy_timer);
+-      pneigh_queue_purge(&tbl->proxy_queue);
++      pneigh_queue_purge(&tbl->proxy_queue, NULL);
+       neigh_ifdown(tbl, NULL);
+       if (atomic_read(&tbl->entries))
+               pr_crit("neighbour leakage\n");
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 545181a1ae043..bb4fbc60b272e 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -612,7 +612,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
+       sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
+       refcount_set(&psock->refcnt, 1);
+ 
+-      rcu_assign_sk_user_data_nocopy(sk, psock);
++      __rcu_assign_sk_user_data_with_flags(sk, psock,
++                                           SK_USER_DATA_NOCOPY |
++                                           SK_USER_DATA_PSOCK);
+       sock_hold(sk);
+ 
+ out:
+diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
+index 6bafd3876aff3..8bf70ce03f951 100644
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES
+ 
+ config NF_CONNTRACK_PROCFS
+       bool "Supply CT list in procfs (OBSOLETE)"
+-      default y
+       depends on PROC_FS
+       help
+       This option enables for the list of known conntrack entries
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 5ee600d108a0a..b70b06e312bd0 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2986,8 +2986,8 @@ static int packet_snd(struct socket *sock, struct msghdr 
*msg, size_t len)
+       if (err)
+               goto out_free;
+ 
+-      if (sock->type == SOCK_RAW &&
+-          !dev_validate_header(dev, skb->data, len)) {
++      if ((sock->type == SOCK_RAW &&
++           !dev_validate_header(dev, skb->data, len)) || !skb->len) {
+               err = -EINVAL;
+               goto out_free;
+       }
+diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
+index 12a87be0fb446..42154b6df6529 100644
+--- a/scripts/Makefile.modpost
++++ b/scripts/Makefile.modpost
+@@ -87,8 +87,7 @@ obj := $(KBUILD_EXTMOD)
+ src := $(obj)
+ 
+ # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
+-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
+-             $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
++include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
+ 
+ # modpost option for external modules
+ MODPOST += -e

Reply via email to