commit:     70cbd5e01cdccbccf1fc76fea16db1e89bfac93e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul  5 20:29:33 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul  5 20:29:33 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=70cbd5e0

Linux paycj 5.15.120

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |   4 +
 1119_linux-5.15.120.patch | 701 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 705 insertions(+)

diff --git a/0000_README b/0000_README
index 8952a110..7167743f 100644
--- a/0000_README
+++ b/0000_README
@@ -519,6 +519,10 @@ Patch:  1118_linux-5.15.119.patch
 From:   https://www.kernel.org
 Desc:   Linux 5.15.119
 
+Patch:  1119_linux-5.15.120.patch
+From:   https://www.kernel.org
+Desc:   Linux 5.15.120
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1119_linux-5.15.120.patch b/1119_linux-5.15.120.patch
new file mode 100644
index 00000000..46026de4
--- /dev/null
+++ b/1119_linux-5.15.120.patch
@@ -0,0 +1,701 @@
+diff --git a/Makefile b/Makefile
+index b863208e31b27..017e483854657 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 15
+-SUBLEVEL = 119
++SUBLEVEL = 120
+ EXTRAVERSION =
+ NAME = Trick or Treat
+ 
+diff --git a/arch/parisc/include/asm/assembly.h 
b/arch/parisc/include/asm/assembly.h
+index a39250cb7dfcf..fd8c1ebd27470 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -72,10 +72,6 @@
+ 
+ #include <asm/asmregs.h>
+ 
+-      sp      =       30
+-      gp      =       27
+-      ipsw    =       22
+-
+       /*
+        * We provide two versions of each macro to convert from physical
+        * to virtual and vice versa. The "_r1" versions take one argument
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c 
b/arch/x86/kernel/cpu/microcode/amd.c
+index 9bffe40e97d34..6a95a52d08daa 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -699,7 +699,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
+       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ 
+       /* need to apply patch? */
+-      if (rev >= mc_amd->hdr.patch_id) {
++      if (rev > mc_amd->hdr.patch_id) {
+               ret = UCODE_OK;
+               goto out;
+       }
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 85f6e242b6b45..714f66aa03388 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -105,6 +105,17 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, 
cpu_llc_shared_map);
+ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
+ EXPORT_PER_CPU_SYMBOL(cpu_info);
+ 
++struct mwait_cpu_dead {
++      unsigned int    control;
++      unsigned int    status;
++};
++
++/*
++ * Cache line aligned data for mwait_play_dead(). Separate on purpose so
++ * that it's unlikely to be touched by other CPUs.
++ */
++static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);
++
+ /* Logical package management. We might want to allocate that dynamically */
+ unsigned int __max_logical_packages __read_mostly;
+ EXPORT_SYMBOL(__max_logical_packages);
+@@ -1685,10 +1696,10 @@ EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
+  */
+ static inline void mwait_play_dead(void)
+ {
++      struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int highest_cstate = 0;
+       unsigned int highest_subcstate = 0;
+-      void *mwait_ptr;
+       int i;
+ 
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+@@ -1723,13 +1734,6 @@ static inline void mwait_play_dead(void)
+                       (highest_subcstate - 1);
+       }
+ 
+-      /*
+-       * This should be a memory location in a cache line which is
+-       * unlikely to be touched by other processors.  The actual
+-       * content is immaterial as it is not actually modified in any way.
+-       */
+-      mwait_ptr = &current_thread_info()->flags;
+-
+       wbinvd();
+ 
+       while (1) {
+@@ -1741,9 +1745,9 @@ static inline void mwait_play_dead(void)
+                * case where we return around the loop.
+                */
+               mb();
+-              clflush(mwait_ptr);
++              clflush(md);
+               mb();
+-              __monitor(mwait_ptr, 0, 0);
++              __monitor(md, 0, 0);
+               mb();
+               __mwait(eax, 0);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index d03a4519f945b..8a0b652da4f4b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -685,7 +685,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+        * num of amdgpu_vm_pt entries.
+        */
+       BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
+-      bp->destroy = &amdgpu_bo_vm_destroy;
+       r = amdgpu_bo_create(adev, bp, &bo_ptr);
+       if (r)
+               return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 924c6d5f86203..8da5cc8fcc6c0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -3274,6 +3274,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
+       long timeout = msecs_to_jiffies(2000);
+       int r;
+ 
++      /* No valid flags defined yet */
++      if (args->in.flags)
++              return -EINVAL;
++
+       switch (args->in.op) {
+       case AMDGPU_VM_OP_RESERVE_VMID:
+               /* We only have requirement to reserve vmid from gfxhub */
+diff --git a/drivers/hid/hid-logitech-hidpp.c 
b/drivers/hid/hid-logitech-hidpp.c
+index baa68ae9b9efc..c61da859cd3c6 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4295,7 +4295,7 @@ static const struct hid_device_id hidpp_devices[] = {
+       { /* wireless touchpad T651 */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+               USB_DEVICE_ID_LOGITECH_T651),
+-        .driver_data = HIDPP_QUIRK_CLASS_WTP },
++        .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
+       { /* Mouse Logitech Anywhere MX */
+         LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+       { /* Mouse Logitech Cube */
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 5fd766ad6e45f..02ca80f68d178 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1314,7 +1314,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac 
*wacom)
+       struct input_dev *pen_input = wacom->pen_input;
+       unsigned char *data = wacom->data;
+       int number_of_valid_frames = 0;
+-      int time_interval = 15000000;
++      ktime_t time_interval = 15000000;
+       ktime_t time_packet_received = ktime_get();
+       int i;
+ 
+@@ -1348,7 +1348,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac 
*wacom)
+       if (number_of_valid_frames) {
+               if (wacom->hid_data.time_delayed)
+                       time_interval = ktime_get() - 
wacom->hid_data.time_delayed;
+-              time_interval /= number_of_valid_frames;
++              time_interval = div_u64(time_interval, number_of_valid_frames);
+               wacom->hid_data.time_delayed = time_packet_received;
+       }
+ 
+@@ -1359,7 +1359,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac 
*wacom)
+               bool range = frame[0] & 0x20;
+               bool invert = frame[0] & 0x10;
+               int frames_number_reversed = number_of_valid_frames - i - 1;
+-              int event_timestamp = time_packet_received - 
frames_number_reversed * time_interval;
++              ktime_t event_timestamp = time_packet_received - 
frames_number_reversed * time_interval;
+ 
+               if (!valid)
+                       continue;
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index dbf23e71aa72a..db06084c4ed5d 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -321,7 +321,7 @@ struct hid_data {
+       int bat_connected;
+       int ps_connected;
+       bool pad_input_event_flag;
+-      int time_delayed;
++      ktime_t time_delayed;
+ };
+ 
+ struct wacom_remote_data {
+diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
+index 88e1f9a0faafd..78cf0e7b53d5b 100644
+--- a/drivers/nubus/proc.c
++++ b/drivers/nubus/proc.c
+@@ -137,6 +137,18 @@ static int nubus_proc_rsrc_show(struct seq_file *m, void 
*v)
+       return 0;
+ }
+ 
++static int nubus_rsrc_proc_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, nubus_proc_rsrc_show, inode);
++}
++
++static const struct proc_ops nubus_rsrc_proc_ops = {
++      .proc_open      = nubus_rsrc_proc_open,
++      .proc_read      = seq_read,
++      .proc_lseek     = seq_lseek,
++      .proc_release   = single_release,
++};
++
+ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+                            const struct nubus_dirent *ent,
+                            unsigned int size)
+@@ -152,8 +164,8 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry 
*procdir,
+               pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+       else
+               pde_data = NULL;
+-      proc_create_single_data(name, S_IFREG | 0444, procdir,
+-                      nubus_proc_rsrc_show, pde_data);
++      proc_create_data(name, S_IFREG | 0444, procdir,
++                       &nubus_rsrc_proc_ops, pde_data);
+ }
+ 
+ void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+@@ -166,9 +178,9 @@ void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+               return;
+ 
+       snprintf(name, sizeof(name), "%x", ent->type);
+-      proc_create_single_data(name, S_IFREG | 0444, procdir,
+-                      nubus_proc_rsrc_show,
+-                      nubus_proc_alloc_pde_data(data, 0));
++      proc_create_data(name, S_IFREG | 0444, procdir,
++                       &nubus_rsrc_proc_ops,
++                       nubus_proc_alloc_pde_data(data, 0));
+ }
+ 
+ /*
+diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
+index 9c857fb5d9681..ede94eaddddae 100644
+--- a/drivers/thermal/mtk_thermal.c
++++ b/drivers/thermal/mtk_thermal.c
+@@ -1028,12 +1028,7 @@ static int mtk_thermal_probe(struct platform_device 
*pdev)
+               return -ENODEV;
+       }
+ 
+-      auxadc_base = devm_of_iomap(&pdev->dev, auxadc, 0, NULL);
+-      if (IS_ERR(auxadc_base)) {
+-              of_node_put(auxadc);
+-              return PTR_ERR(auxadc_base);
+-      }
+-
++      auxadc_base = of_iomap(auxadc, 0);
+       auxadc_phys_base = of_get_phys_base(auxadc);
+ 
+       of_node_put(auxadc);
+@@ -1049,12 +1044,7 @@ static int mtk_thermal_probe(struct platform_device 
*pdev)
+               return -ENODEV;
+       }
+ 
+-      apmixed_base = devm_of_iomap(&pdev->dev, apmixedsys, 0, NULL);
+-      if (IS_ERR(apmixed_base)) {
+-              of_node_put(apmixedsys);
+-              return PTR_ERR(apmixed_base);
+-      }
+-
++      apmixed_base = of_iomap(apmixedsys, 0);
+       apmixed_phys_base = of_get_phys_base(apmixedsys);
+ 
+       of_node_put(apmixedsys);
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index b4c49f9cc379e..87763f48c6c3d 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -247,6 +247,30 @@ static inline void copy_user_highpage(struct page *to, 
struct page *from,
+ 
+ #endif
+ 
++#ifdef copy_mc_to_kernel
++static inline int copy_mc_user_highpage(struct page *to, struct page *from,
++                                      unsigned long vaddr, struct 
vm_area_struct *vma)
++{
++      unsigned long ret;
++      char *vfrom, *vto;
++
++      vfrom = kmap_local_page(from);
++      vto = kmap_local_page(to);
++      ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
++      kunmap_local(vto);
++      kunmap_local(vfrom);
++
++      return ret;
++}
++#else
++static inline int copy_mc_user_highpage(struct page *to, struct page *from,
++                                      unsigned long vaddr, struct 
vm_area_struct *vma)
++{
++      copy_user_highpage(to, from, vaddr, vma);
++      return 0;
++}
++#endif
++
+ #ifndef __HAVE_ARCH_COPY_HIGHPAGE
+ 
+ static inline void copy_highpage(struct page *to, struct page *from)
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index e4e1817bb3b89..a27a6b58d3740 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -3124,7 +3124,6 @@ enum mf_flags {
+       MF_SOFT_OFFLINE = 1 << 3,
+ };
+ extern int memory_failure(unsigned long pfn, int flags);
+-extern void memory_failure_queue(unsigned long pfn, int flags);
+ extern void memory_failure_queue_kick(int cpu);
+ extern int unpoison_memory(unsigned long pfn);
+ extern int sysctl_memory_failure_early_kill;
+@@ -3133,8 +3132,12 @@ extern void shake_page(struct page *p);
+ extern atomic_long_t num_poisoned_pages __read_mostly;
+ extern int soft_offline_page(unsigned long pfn, int flags);
+ #ifdef CONFIG_MEMORY_FAILURE
++extern void memory_failure_queue(unsigned long pfn, int flags);
+ extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags);
+ #else
++static inline void memory_failure_queue(unsigned long pfn, int flags)
++{
++}
+ static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+ {
+       return 0;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 4f2271f27a1db..7a70595c3c15a 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -12588,9 +12588,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
+       }
+ 
+       /* finally lock prog and jit images for all functions and
+-       * populate kallsysm
++       * populate kallsysm. Begin at the first subprogram, since
++       * bpf_prog_load will add the kallsyms for the main program.
+        */
+-      for (i = 0; i < env->subprog_cnt; i++) {
++      for (i = 1; i < env->subprog_cnt; i++) {
+               bpf_prog_lock_ro(func[i]);
+               bpf_prog_kallsyms_add(func[i]);
+       }
+@@ -12615,6 +12616,8 @@ static int jit_subprogs(struct bpf_verifier_env *env)
+ 
+       prog->jited = 1;
+       prog->bpf_func = func[0]->bpf_func;
++      prog->aux->extable = func[0]->aux->extable;
++      prog->aux->num_exentries = func[0]->aux->num_exentries;
+       prog->aux->func = func;
+       prog->aux->func_cnt = env->subprog_cnt;
+       bpf_prog_jit_attempt_done(prog);
+diff --git a/mm/memory.c b/mm/memory.c
+index 8d71a82462dd5..1bb01b12db532 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2753,10 +2753,16 @@ static inline int pte_unmap_same(struct mm_struct *mm, 
pmd_t *pmd,
+       return same;
+ }
+ 
+-static inline bool cow_user_page(struct page *dst, struct page *src,
+-                               struct vm_fault *vmf)
++/*
++ * Return:
++ *    0:              copied succeeded
++ *    -EHWPOISON:     copy failed due to hwpoison in source page
++ *    -EAGAIN:        copied failed (some other reason)
++ */
++static inline int cow_user_page(struct page *dst, struct page *src,
++                                    struct vm_fault *vmf)
+ {
+-      bool ret;
++      int ret;
+       void *kaddr;
+       void __user *uaddr;
+       bool locked = false;
+@@ -2765,8 +2771,11 @@ static inline bool cow_user_page(struct page *dst, 
struct page *src,
+       unsigned long addr = vmf->address;
+ 
+       if (likely(src)) {
+-              copy_user_highpage(dst, src, addr, vma);
+-              return true;
++              if (copy_mc_user_highpage(dst, src, addr, vma)) {
++                      memory_failure_queue(page_to_pfn(src), 0);
++                      return -EHWPOISON;
++              }
++              return 0;
+       }
+ 
+       /*
+@@ -2793,7 +2802,7 @@ static inline bool cow_user_page(struct page *dst, 
struct page *src,
+                        * and update local tlb only
+                        */
+                       update_mmu_tlb(vma, addr, vmf->pte);
+-                      ret = false;
++                      ret = -EAGAIN;
+                       goto pte_unlock;
+               }
+ 
+@@ -2818,7 +2827,7 @@ static inline bool cow_user_page(struct page *dst, 
struct page *src,
+               if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+                       /* The PTE changed under us, update local tlb */
+                       update_mmu_tlb(vma, addr, vmf->pte);
+-                      ret = false;
++                      ret = -EAGAIN;
+                       goto pte_unlock;
+               }
+ 
+@@ -2837,7 +2846,7 @@ warn:
+               }
+       }
+ 
+-      ret = true;
++      ret = 0;
+ 
+ pte_unlock:
+       if (locked)
+@@ -3003,6 +3012,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+       pte_t entry;
+       int page_copied = 0;
+       struct mmu_notifier_range range;
++      int ret;
+ 
+       if (unlikely(anon_vma_prepare(vma)))
+               goto oom;
+@@ -3018,17 +3028,20 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+               if (!new_page)
+                       goto oom;
+ 
+-              if (!cow_user_page(new_page, old_page, vmf)) {
++              ret = cow_user_page(new_page, old_page, vmf);
++              if (ret) {
+                       /*
+                        * COW failed, if the fault was solved by other,
+                        * it's fine. If not, userspace would re-fault on
+                        * the same address and we will handle the fault
+                        * from the second attempt.
++                       * The -EHWPOISON case will not be retried.
+                        */
+                       put_page(new_page);
+                       if (old_page)
+                               put_page(old_page);
+-                      return 0;
++
++                      return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
+               }
+       }
+ 
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index ca69ca39c8fd8..4dccf7b4b88d1 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -992,8 +992,9 @@ static int isotp_sendmsg(struct socket *sock, struct 
msghdr *msg, size_t size)
+               /* wait for complete transmission of current pdu */
+               wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ 
+-              if (sk->sk_err)
+-                      return -sk->sk_err;
++              err = sock_error(sk);
++              if (err)
++                      return err;
+       }
+ 
+       return size;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b6a38af72e1bc..82b1583f709d3 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -51,7 +51,7 @@ enum {
+ static struct percpu_counter mptcp_sockets_allocated;
+ 
+ static void __mptcp_destroy_sock(struct sock *sk);
+-static void __mptcp_check_send_data_fin(struct sock *sk);
++static void mptcp_check_send_data_fin(struct sock *sk);
+ 
+ DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
+ static struct net_device mptcp_napi_dev;
+@@ -355,8 +355,7 @@ static bool mptcp_pending_data_fin_ack(struct sock *sk)
+ {
+       struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-      return !__mptcp_check_fallback(msk) &&
+-             ((1 << sk->sk_state) &
++      return ((1 << sk->sk_state) &
+               (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
+              msk->write_seq == READ_ONCE(msk->snd_una);
+ }
+@@ -509,9 +508,6 @@ static bool mptcp_check_data_fin(struct sock *sk)
+       u64 rcv_data_fin_seq;
+       bool ret = false;
+ 
+-      if (__mptcp_check_fallback(msk))
+-              return ret;
+-
+       /* Need to ack a DATA_FIN received from a peer while this side
+        * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
+        * msk->rcv_data_fin was set when parsing the incoming options
+@@ -549,7 +545,8 @@ static bool mptcp_check_data_fin(struct sock *sk)
+               }
+ 
+               ret = true;
+-              mptcp_send_ack(msk);
++              if (!__mptcp_check_fallback(msk))
++                      mptcp_send_ack(msk);
+               mptcp_close_wake_up(sk);
+       }
+       return ret;
+@@ -1612,7 +1609,7 @@ out:
+       if (!mptcp_timer_pending(sk))
+               mptcp_reset_timer(sk);
+       if (copied)
+-              __mptcp_check_send_data_fin(sk);
++              mptcp_check_send_data_fin(sk);
+ }
+ 
+ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
+@@ -2451,7 +2448,6 @@ static void mptcp_worker(struct work_struct *work)
+       if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
+               goto unlock;
+ 
+-      mptcp_check_data_fin_ack(sk);
+       mptcp_flush_join_list(msk);
+ 
+       mptcp_check_fastclose(msk);
+@@ -2462,7 +2458,8 @@ static void mptcp_worker(struct work_struct *work)
+       if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
+               mptcp_check_for_eof(msk);
+ 
+-      __mptcp_check_send_data_fin(sk);
++      mptcp_check_send_data_fin(sk);
++      mptcp_check_data_fin_ack(sk);
+       mptcp_check_data_fin(sk);
+ 
+       /* There is no point in keeping around an orphaned sk timedout or
+@@ -2591,6 +2588,12 @@ void mptcp_subflow_shutdown(struct sock *sk, struct 
sock *ssk, int how)
+                       pr_debug("Fallback");
+                       ssk->sk_shutdown |= how;
+                       tcp_shutdown(ssk, how);
++
++                      /* simulate the data_fin ack reception to let the state
++                       * machine move forward
++                       */
++                      WRITE_ONCE(mptcp_sk(sk)->snd_una, 
mptcp_sk(sk)->snd_nxt);
++                      mptcp_schedule_work(sk);
+               } else {
+                       pr_debug("Sending DATA_FIN on subflow %p", ssk);
+                       tcp_send_ack(ssk);
+@@ -2630,7 +2633,7 @@ static int mptcp_close_state(struct sock *sk)
+       return next & TCP_ACTION_FIN;
+ }
+ 
+-static void __mptcp_check_send_data_fin(struct sock *sk)
++static void mptcp_check_send_data_fin(struct sock *sk)
+ {
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+@@ -2648,18 +2651,6 @@ static void __mptcp_check_send_data_fin(struct sock *sk)
+ 
+       WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+ 
+-      /* fallback socket will not get data_fin/ack, can move to the next
+-       * state now
+-       */
+-      if (__mptcp_check_fallback(msk)) {
+-              if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
+-                      inet_sk_state_store(sk, TCP_CLOSE);
+-                      mptcp_close_wake_up(sk);
+-              } else if (sk->sk_state == TCP_FIN_WAIT1) {
+-                      inet_sk_state_store(sk, TCP_FIN_WAIT2);
+-              }
+-      }
+-
+       mptcp_flush_join_list(msk);
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
+@@ -2680,7 +2671,7 @@ static void __mptcp_wr_shutdown(struct sock *sk)
+       WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
+       WRITE_ONCE(msk->snd_data_fin_enable, 1);
+ 
+-      __mptcp_check_send_data_fin(sk);
++      mptcp_check_send_data_fin(sk);
+ }
+ 
+ static void __mptcp_destroy_sock(struct sock *sk)
+@@ -2807,6 +2798,12 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+ 
++      /* Deny disconnect if other threads are blocked in sk_wait_event()
++       * or inet_wait_for_connect().
++       */
++      if (sk->sk_wait_pending)
++              return -EBUSY;
++
+       mptcp_do_flush_join_list(msk);
+ 
+       mptcp_for_each_subflow(msk, subflow) {
+@@ -2845,6 +2842,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+               inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
+ #endif
+ 
++      nsk->sk_wait_pending = 0;
+       __mptcp_init_sock(nsk);
+ 
+       msk = mptcp_sk(nsk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 9b89999062c97..666f6720db765 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1653,14 +1653,16 @@ static void subflow_state_change(struct sock *sk)
+ {
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct sock *parent = subflow->conn;
++      struct mptcp_sock *msk;
+ 
+       __subflow_state_change(sk);
+ 
++      msk = mptcp_sk(parent);
+       if (subflow_simultaneous_connect(sk)) {
+               mptcp_propagate_sndbuf(parent, sk);
+               mptcp_do_fallback(sk);
+-              mptcp_rcv_space_init(mptcp_sk(parent), sk);
+-              pr_fallback(mptcp_sk(parent));
++              mptcp_rcv_space_init(msk, sk);
++              pr_fallback(msk);
+               subflow->conn_finished = 1;
+               mptcp_set_connected(parent);
+       }
+@@ -1676,11 +1678,12 @@ static void subflow_state_change(struct sock *sk)
+ 
+       subflow_sched_work_if_closed(mptcp_sk(parent), sk);
+ 
+-      if (__mptcp_check_fallback(mptcp_sk(parent)) &&
+-          !subflow->rx_eof && subflow_is_done(sk)) {
+-              subflow->rx_eof = 1;
+-              mptcp_subflow_eof(parent);
+-      }
++      /* when the fallback subflow closes the rx side, trigger a 'dummy'
++       * ingress data fin, so that the msk state will follow along
++       */
++      if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == 
sk &&
++          mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
++              mptcp_schedule_work(parent);
+ }
+ 
+ static int subflow_ulp_init(struct sock *sk)
+diff --git a/scripts/tags.sh b/scripts/tags.sh
+index 91413d45f0fa1..d40cae4e7ea28 100755
+--- a/scripts/tags.sh
++++ b/scripts/tags.sh
+@@ -32,6 +32,13 @@ else
+       ignore="$ignore ( -path ${tree}tools ) -prune -o"
+ fi
+ 
++# gtags(1) refuses to index any file outside of its current working dir.
++# If gtags indexing is requested and the build output directory is not
++# the kernel source tree, index all files in absolute-path form.
++if [[ "$1" == "gtags" && -n "${tree}" ]]; then
++      tree=$(realpath "$tree")/
++fi
++
+ # Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
+ if [ "${ALLSOURCE_ARCHS}" = "" ]; then
+       ALLSOURCE_ARCHS=${SRCARCH}
+@@ -131,7 +138,7 @@ docscope()
+ 
+ dogtags()
+ {
+-      all_target_sources | gtags -i -f -
++      all_target_sources | gtags -i -C "${tree:-.}" -f - "$PWD"
+ }
+ 
+ # Basic regular expressions with an optional /kind-spec/ for ctags and
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index b1e5fd99e38af..80c54196e0e4f 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -1357,10 +1357,23 @@ static int dso__load_kcore(struct dso *dso, struct map 
*map,
+ 
+       /* Find the kernel map using the '_stext' symbol */
+       if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) 
{
++              u64 replacement_size = 0;
++
+               list_for_each_entry(new_map, &md.maps, node) {
+-                      if (stext >= new_map->start && stext < new_map->end) {
++                      u64 new_size = new_map->end - new_map->start;
++
++                      if (!(stext >= new_map->start && stext < new_map->end))
++                              continue;
++
++                      /*
++                       * On some architectures, ARM64 for example, the kernel
++                       * text can get allocated inside of the vmalloc segment.
++                       * Select the smallest matching segment, in case stext
++                       * falls within more than one in the list.
++                       */
++                      if (!replacement_map || new_size < replacement_size) {
+                               replacement_map = new_map;
+-                              break;
++                              replacement_size = new_size;
+                       }
+               }
+       }

Reply via email to