commit:     475944ee2d977ef08f3ed7e0f7e5c7ccb6a39cd7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Feb  3 23:23:26 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Feb  3 23:23:26 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=475944ee

Linux patch 4.4.255

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1254_linux-4.4.255.patch | 1473 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1477 insertions(+)

diff --git a/0000_README b/0000_README
index 1150ccd..849b173 100644
--- a/0000_README
+++ b/0000_README
@@ -1059,6 +1059,10 @@ Patch:  1253_linux-4.4.254.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.254
 
+Patch:  1254_linux-4.4.255.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.255
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1254_linux-4.4.255.patch b/1254_linux-4.4.255.patch
new file mode 100644
index 0000000..21becaa
--- /dev/null
+++ b/1254_linux-4.4.255.patch
@@ -0,0 +1,1473 @@
+diff --git a/Makefile b/Makefile
+index 5abb21c7d852e..b18b61e540e92 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 254
++SUBLEVEL = 255
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/mach-imx/suspend-imx6.S 
b/arch/arm/mach-imx/suspend-imx6.S
+index 7d84b617af481..99d2e296082c7 100644
+--- a/arch/arm/mach-imx/suspend-imx6.S
++++ b/arch/arm/mach-imx/suspend-imx6.S
+@@ -73,6 +73,7 @@
+ #define MX6Q_CCM_CCR  0x0
+ 
+       .align 3
++      .arm
+ 
+       .macro  sync_l2_cache
+ 
+diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
+index 822829f005902..04890ac518d04 100644
+--- a/arch/x86/kvm/pmu_intel.c
++++ b/arch/x86/kvm/pmu_intel.c
+@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] 
= {
+       [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+       [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+       [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+-      [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
++      [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
+ };
+ 
+ /* mapping between fixed pmc index and intel_arch_events array */
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index a899a7abcf638..139ee989b0d0a 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -259,20 +259,12 @@ int __acpi_device_uevent_modalias(struct acpi_device 
*adev,
+       if (add_uevent_var(env, "MODALIAS="))
+               return -ENOMEM;
+ 
+-      len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+-                                sizeof(env->buf) - env->buflen);
+-      if (len < 0)
+-              return len;
+-
+-      env->buflen += len;
+-      if (!adev->data.of_compatible)
+-              return 0;
+-
+-      if (len > 0 && add_uevent_var(env, "MODALIAS="))
+-              return -ENOMEM;
+-
+-      len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+-                               sizeof(env->buf) - env->buflen);
++      if (adev->data.of_compatible)
++              len = create_of_modalias(adev, &env->buf[env->buflen - 1],
++                                       sizeof(env->buf) - env->buflen);
++      else
++              len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
++                                        sizeof(env->buf) - env->buflen);
+       if (len < 0)
+               return len;
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c 
b/drivers/infiniband/hw/cxgb4/qp.c
+index 04206c600098f..07579e31168c5 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1898,7 +1898,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct 
ib_qp_attr *attr,
+       init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
+       init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
+       init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
+-      init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
++      init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
+       init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
+       init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
+       return 0;
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 00169c9eb3eed..54f27dd9f156d 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -1012,8 +1012,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+ {
+       struct intel_iommu *iommu;
+       u32 ver, sts;
+-      int agaw = 0;
+-      int msagaw = 0;
++      int agaw = -1;
++      int msagaw = -1;
+       int err;
+ 
+       if (!drhd->reg_base_addr) {
+@@ -1038,17 +1038,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+       }
+ 
+       err = -EINVAL;
+-      agaw = iommu_calculate_agaw(iommu);
+-      if (agaw < 0) {
+-              pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
+-                      iommu->seq_id);
+-              goto err_unmap;
++      if (cap_sagaw(iommu->cap) == 0) {
++              pr_info("%s: No supported address widths. Not attempting DMA 
translation.\n",
++                      iommu->name);
++              drhd->ignored = 1;
+       }
+-      msagaw = iommu_calculate_max_sagaw(iommu);
+-      if (msagaw < 0) {
+-              pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
+-                      iommu->seq_id);
+-              goto err_unmap;
++
++      if (!drhd->ignored) {
++              agaw = iommu_calculate_agaw(iommu);
++              if (agaw < 0) {
++                      pr_err("Cannot get a valid agaw for iommu (seq_id = 
%d)\n",
++                             iommu->seq_id);
++                      drhd->ignored = 1;
++              }
++      }
++      if (!drhd->ignored) {
++              msagaw = iommu_calculate_max_sagaw(iommu);
++              if (msagaw < 0) {
++                      pr_err("Cannot get a valid max agaw for iommu (seq_id = 
%d)\n",
++                             iommu->seq_id);
++                      drhd->ignored = 1;
++                      agaw = -1;
++              }
+       }
+       iommu->agaw = agaw;
+       iommu->msagaw = msagaw;
+@@ -1076,16 +1087,15 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+       raw_spin_lock_init(&iommu->register_lock);
+ 
+       drhd->iommu = iommu;
++      iommu->drhd = drhd;
+ 
+-      if (intel_iommu_enabled)
++      if (intel_iommu_enabled && !drhd->ignored)
+               iommu->iommu_dev = iommu_device_create(NULL, iommu,
+                                                      intel_iommu_groups,
+                                                      "%s", iommu->name);
+ 
+       return 0;
+ 
+-err_unmap:
+-      unmap_iommu(iommu);
+ error_free_seq_id:
+       dmar_free_seq_id(iommu);
+ error:
+@@ -1095,7 +1105,8 @@ error:
+ 
+ static void free_iommu(struct intel_iommu *iommu)
+ {
+-      iommu_device_destroy(iommu->iommu_dev);
++      if (intel_iommu_enabled && !iommu->drhd->ignored)
++              iommu_device_destroy(iommu->iommu_dev);
+ 
+       if (iommu->irq) {
+               if (iommu->pr_irq) {
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 45f15ac6b1015..1a79118b008b1 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -987,7 +987,7 @@ static int can_fill_info(struct sk_buff *skb, const struct 
net_device *dev)
+ {
+       struct can_priv *priv = netdev_priv(dev);
+       struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+-      struct can_berr_counter bec;
++      struct can_berr_counter bec = { };
+       enum can_state state = priv->state;
+ 
+       if (priv->do_get_state)
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 42303f3f1348d..3f18faf99367d 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -934,6 +934,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)},    /* Olivetti Olicard 160 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},    /* Olivetti Olicard 500 */
+       {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
++      {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
+       {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},    /* Cinterion PHxx,PXxx */
+       {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 
RmNet) */
+       {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},    /* Cinterion PHxx,PXxx (2 
RmNet) */
+diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c 
b/drivers/net/wireless/mediatek/mt7601u/dma.c
+index 3d0b9324d5bfd..970aaf2ed3a71 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
+@@ -160,8 +160,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct 
mt7601u_dma_buf_rx *e)
+ 
+       if (new_p) {
+               /* we have one extra ref from the allocator */
+-              __free_pages(e->p, MT_RX_ORDER);
+-
++              put_page(e->p);
+               e->p = new_p;
+       }
+ }
+@@ -318,7 +317,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
+       }
+ 
+       e = &q->e[q->end];
+-      e->skb = skb;
+       usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+                         mt7601u_complete_tx, q);
+       ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+@@ -336,6 +334,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
+ 
+       q->end = (q->end + 1) % q->entries;
+       q->used++;
++      e->skb = skb;
+ 
+       if (q->used >= q->entries)
+               ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+diff --git a/fs/exec.c b/fs/exec.c
+index 46cc0c072246d..ce111af5784be 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -875,7 +875,7 @@ static int exec_mmap(struct mm_struct *mm)
+       /* Notify parent that we're no longer interested in the old VM */
+       tsk = current;
+       old_mm = current->mm;
+-      mm_release(tsk, old_mm);
++      exec_mm_release(tsk, old_mm);
+ 
+       if (old_mm) {
+               sync_mm_rss(old_mm);
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index a76c9172b2eb0..24dd42910d7c2 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -306,8 +306,6 @@ struct compat_kexec_segment;
+ struct compat_mq_attr;
+ struct compat_msgbuf;
+ 
+-extern void compat_exit_robust_list(struct task_struct *curr);
+-
+ asmlinkage long
+ compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
+                          compat_size_t len);
+diff --git a/include/linux/futex.h b/include/linux/futex.h
+index c015fa91e7cce..0f294ae63c78c 100644
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -1,6 +1,8 @@
+ #ifndef _LINUX_FUTEX_H
+ #define _LINUX_FUTEX_H
+ 
++#include <linux/sched.h>
++
+ #include <uapi/linux/futex.h>
+ 
+ struct inode;
+@@ -11,9 +13,6 @@ union ktime;
+ long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
+             u32 __user *uaddr2, u32 val2, u32 val3);
+ 
+-extern int
+-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
+-
+ /*
+  * Futexes are matched on equal values of this key.
+  * The key type depends on whether it's a shared or private mapping.
+@@ -56,19 +55,34 @@ union futex_key {
+ #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
+ 
+ #ifdef CONFIG_FUTEX
+-extern void exit_robust_list(struct task_struct *curr);
+-extern void exit_pi_state_list(struct task_struct *curr);
+-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
+-#define futex_cmpxchg_enabled 1
+-#else
+-extern int futex_cmpxchg_enabled;
+-#endif
+-#else
+-static inline void exit_robust_list(struct task_struct *curr)
+-{
+-}
+-static inline void exit_pi_state_list(struct task_struct *curr)
++enum {
++      FUTEX_STATE_OK,
++      FUTEX_STATE_EXITING,
++      FUTEX_STATE_DEAD,
++};
++
++static inline void futex_init_task(struct task_struct *tsk)
+ {
++      tsk->robust_list = NULL;
++#ifdef CONFIG_COMPAT
++      tsk->compat_robust_list = NULL;
++#endif
++      INIT_LIST_HEAD(&tsk->pi_state_list);
++      tsk->pi_state_cache = NULL;
++      tsk->futex_state = FUTEX_STATE_OK;
++      mutex_init(&tsk->futex_exit_mutex);
+ }
++
++void futex_exit_recursive(struct task_struct *tsk);
++void futex_exit_release(struct task_struct *tsk);
++void futex_exec_release(struct task_struct *tsk);
++
++long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
++            u32 __user *uaddr2, u32 val2, u32 val3);
++#else
++static inline void futex_init_task(struct task_struct *tsk) { }
++static inline void futex_exit_recursive(struct task_struct *tsk) { }
++static inline void futex_exit_release(struct task_struct *tsk) { }
++static inline void futex_exec_release(struct task_struct *tsk) { }
+ #endif
+ #endif
+diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
+index d86ac620f0aac..188bd17689711 100644
+--- a/include/linux/intel-iommu.h
++++ b/include/linux/intel-iommu.h
+@@ -447,6 +447,8 @@ struct intel_iommu {
+       struct device   *iommu_dev; /* IOMMU-sysfs device */
+       int             node;
+       u32             flags;      /* Software defined flags */
++
++      struct dmar_drhd_unit *drhd;
+ };
+ 
+ static inline void __iommu_flush_cache(
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index df5f53ea2f86c..8c10e97f94fea 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1704,6 +1704,8 @@ struct task_struct {
+ #endif
+       struct list_head pi_state_list;
+       struct futex_pi_state *pi_state_cache;
++      struct mutex futex_exit_mutex;
++      unsigned int futex_state;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+       struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
+@@ -2099,7 +2101,6 @@ extern void thread_group_cputime_adjusted(struct 
task_struct *p, cputime_t *ut,
+  * Per process flags
+  */
+ #define PF_EXITING    0x00000004      /* getting shut down */
+-#define PF_EXITPIDONE 0x00000008      /* pi exit done on shut down */
+ #define PF_VCPU               0x00000010      /* I'm a virtual CPU */
+ #define PF_WQ_WORKER  0x00000020      /* I'm a workqueue worker */
+ #define PF_FORKNOEXEC 0x00000040      /* forked but didn't exec */
+@@ -2647,8 +2648,10 @@ extern struct mm_struct *get_task_mm(struct task_struct 
*task);
+  * succeeds.
+  */
+ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int 
mode);
+-/* Remove the current tasks stale references to the old mm_struct */
+-extern void mm_release(struct task_struct *, struct mm_struct *);
++/* Remove the current tasks stale references to the old mm_struct on exit() */
++extern void exit_mm_release(struct task_struct *, struct mm_struct *);
++/* Remove the current tasks stale references to the old mm_struct on exec() */
++extern void exec_mm_release(struct task_struct *, struct mm_struct *);
+ 
+ #ifdef CONFIG_HAVE_COPY_THREAD_TLS
+ extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
+diff --git a/kernel/Makefile b/kernel/Makefile
+index 53abf008ecb39..a672bece1f499 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -36,9 +36,6 @@ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+ obj-y += time/
+ obj-$(CONFIG_FUTEX) += futex.o
+-ifeq ($(CONFIG_COMPAT),y)
+-obj-$(CONFIG_FUTEX) += futex_compat.o
+-endif
+ obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
+ obj-$(CONFIG_SMP) += smp.o
+ ifneq ($(CONFIG_SMP),y)
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 5c20a32c95392..8d3c268fb1b8d 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -389,7 +389,7 @@ static void exit_mm(struct task_struct *tsk)
+       struct mm_struct *mm = tsk->mm;
+       struct core_state *core_state;
+ 
+-      mm_release(tsk, mm);
++      exit_mm_release(tsk, mm);
+       if (!mm)
+               return;
+       sync_mm_rss(mm);
+@@ -695,27 +695,12 @@ void do_exit(long code)
+        */
+       if (unlikely(tsk->flags & PF_EXITING)) {
+               pr_alert("Fixing recursive fault but reboot is needed!\n");
+-              /*
+-               * We can do this unlocked here. The futex code uses
+-               * this flag just to verify whether the pi state
+-               * cleanup has been done or not. In the worst case it
+-               * loops once more. We pretend that the cleanup was
+-               * done as there is no way to return. Either the
+-               * OWNER_DIED bit is set by now or we push the blocked
+-               * task into the wait for ever nirwana as well.
+-               */
+-              tsk->flags |= PF_EXITPIDONE;
++              futex_exit_recursive(tsk);
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule();
+       }
+ 
+       exit_signals(tsk);  /* sets PF_EXITING */
+-      /*
+-       * tsk->flags are checked in the futex code to protect against
+-       * an exiting task cleaning up the robust pi futexes.
+-       */
+-      smp_mb();
+-      raw_spin_unlock_wait(&tsk->pi_lock);
+ 
+       if (unlikely(in_atomic())) {
+               pr_info("note: %s[%d] exited with preempt_count %d\n",
+@@ -793,12 +778,6 @@ void do_exit(long code)
+        * Make sure we are holding no locks:
+        */
+       debug_check_no_locks_held();
+-      /*
+-       * We can do this unlocked here. The futex code uses this flag
+-       * just to verify whether the pi state cleanup has been done
+-       * or not. In the worst case it loops once more.
+-       */
+-      tsk->flags |= PF_EXITPIDONE;
+ 
+       if (tsk->io_context)
+               exit_io_context(tsk);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 5d35be1e0913b..2bd4c38efa095 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -887,24 +887,8 @@ static int wait_for_vfork_done(struct task_struct *child,
+  * restoring the old one. . .
+  * Eric Biederman 10 January 1998
+  */
+-void mm_release(struct task_struct *tsk, struct mm_struct *mm)
++static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+ {
+-      /* Get rid of any futexes when releasing the mm */
+-#ifdef CONFIG_FUTEX
+-      if (unlikely(tsk->robust_list)) {
+-              exit_robust_list(tsk);
+-              tsk->robust_list = NULL;
+-      }
+-#ifdef CONFIG_COMPAT
+-      if (unlikely(tsk->compat_robust_list)) {
+-              compat_exit_robust_list(tsk);
+-              tsk->compat_robust_list = NULL;
+-      }
+-#endif
+-      if (unlikely(!list_empty(&tsk->pi_state_list)))
+-              exit_pi_state_list(tsk);
+-#endif
+-
+       uprobe_free_utask(tsk);
+ 
+       /* Get rid of any cached register state */
+@@ -937,6 +921,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct 
*mm)
+               complete_vfork_done(tsk);
+ }
+ 
++void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
++{
++      futex_exit_release(tsk);
++      mm_release(tsk, mm);
++}
++
++void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
++{
++      futex_exec_release(tsk);
++      mm_release(tsk, mm);
++}
++
+ /*
+  * Allocate a new mm structure and copy contents from the
+  * mm structure of the passed in task structure.
+@@ -1511,14 +1507,8 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
+ #ifdef CONFIG_BLOCK
+       p->plug = NULL;
+ #endif
+-#ifdef CONFIG_FUTEX
+-      p->robust_list = NULL;
+-#ifdef CONFIG_COMPAT
+-      p->compat_robust_list = NULL;
+-#endif
+-      INIT_LIST_HEAD(&p->pi_state_list);
+-      p->pi_state_cache = NULL;
+-#endif
++      futex_init_task(p);
++
+       /*
+        * sigaltstack should be cleared when sharing the same VM
+        */
+diff --git a/kernel/futex.c b/kernel/futex.c
+index e50b67674ba25..f1990e2a51e5a 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -44,6 +44,7 @@
+  *  along with this program; if not, write to the Free Software
+  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+  */
++#include <linux/compat.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
+ #include <linux/fs.h>
+@@ -171,8 +172,10 @@
+  * double_lock_hb() and double_unlock_hb(), respectively.
+  */
+ 
+-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+-int __read_mostly futex_cmpxchg_enabled;
++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
++#define futex_cmpxchg_enabled 1
++#else
++static int  __read_mostly futex_cmpxchg_enabled;
+ #endif
+ 
+ /*
+@@ -328,6 +331,12 @@ static inline bool should_fail_futex(bool fshared)
+ }
+ #endif /* CONFIG_FAIL_FUTEX */
+ 
++#ifdef CONFIG_COMPAT
++static void compat_exit_robust_list(struct task_struct *curr);
++#else
++static inline void compat_exit_robust_list(struct task_struct *curr) { }
++#endif
++
+ static inline void futex_get_mm(union futex_key *key)
+ {
+       atomic_inc(&key->private.mm->mm_count);
+@@ -886,7 +895,7 @@ static struct task_struct * futex_find_get_task(pid_t pid)
+  * Kernel cleans up PI-state, but userspace is likely hosed.
+  * (Robust-futex cleanup is separate and might save the day for userspace.)
+  */
+-void exit_pi_state_list(struct task_struct *curr)
++static void exit_pi_state_list(struct task_struct *curr)
+ {
+       struct list_head *next, *head = &curr->pi_state_list;
+       struct futex_pi_state *pi_state;
+@@ -1058,12 +1067,43 @@ out_state:
+       return 0;
+ }
+ 
++/**
++ * wait_for_owner_exiting - Block until the owner has exited
++ * @exiting:  Pointer to the exiting task
++ *
++ * Caller must hold a refcount on @exiting.
++ */
++static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
++{
++      if (ret != -EBUSY) {
++              WARN_ON_ONCE(exiting);
++              return;
++      }
++
++      if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
++              return;
++
++      mutex_lock(&exiting->futex_exit_mutex);
++      /*
++       * No point in doing state checking here. If the waiter got here
++       * while the task was in exec()->exec_futex_release() then it can
++       * have any FUTEX_STATE_* value when the waiter has acquired the
++       * mutex. OK, if running, EXITING or DEAD if it reached exit()
++       * already. Highly unlikely and not a problem. Just one more round
++       * through the futex maze.
++       */
++      mutex_unlock(&exiting->futex_exit_mutex);
++
++      put_task_struct(exiting);
++}
++
+ /*
+  * Lookup the task for the TID provided from user space and attach to
+  * it after doing proper sanity checks.
+  */
+ static int attach_to_pi_owner(u32 uval, union futex_key *key,
+-                            struct futex_pi_state **ps)
++                            struct futex_pi_state **ps,
++                            struct task_struct **exiting)
+ {
+       pid_t pid = uval & FUTEX_TID_MASK;
+       struct futex_pi_state *pi_state;
+@@ -1085,22 +1125,33 @@ static int attach_to_pi_owner(u32 uval, union 
futex_key *key,
+       }
+ 
+       /*
+-       * We need to look at the task state flags to figure out,
+-       * whether the task is exiting. To protect against the do_exit
+-       * change of the task flags, we do this protected by
+-       * p->pi_lock:
++       * We need to look at the task state to figure out, whether the
++       * task is exiting. To protect against the change of the task state
++       * in futex_exit_release(), we do this protected by p->pi_lock:
+        */
+       raw_spin_lock_irq(&p->pi_lock);
+-      if (unlikely(p->flags & PF_EXITING)) {
++      if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
+               /*
+-               * The task is on the way out. When PF_EXITPIDONE is
+-               * set, we know that the task has finished the
+-               * cleanup:
++               * The task is on the way out. When the futex state is
++               * FUTEX_STATE_DEAD, we know that the task has finished
++               * the cleanup:
+                */
+-              int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
++              int ret = (p->futex_state = FUTEX_STATE_DEAD) ? -ESRCH : 
-EAGAIN;
+ 
+               raw_spin_unlock_irq(&p->pi_lock);
+-              put_task_struct(p);
++              /*
++               * If the owner task is between FUTEX_STATE_EXITING and
++               * FUTEX_STATE_DEAD then store the task pointer and keep
++               * the reference on the task struct. The calling code will
++               * drop all locks, wait for the task to reach
++               * FUTEX_STATE_DEAD and then drop the refcount. This is
++               * required to prevent a live lock when the current task
++               * preempted the exiting task between the two states.
++               */
++              if (ret == -EBUSY)
++                      *exiting = p;
++              else
++                      put_task_struct(p);
+               return ret;
+       }
+ 
+@@ -1131,7 +1182,8 @@ static int attach_to_pi_owner(u32 uval, union futex_key 
*key,
+ }
+ 
+ static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+-                         union futex_key *key, struct futex_pi_state **ps)
++                         union futex_key *key, struct futex_pi_state **ps,
++                         struct task_struct **exiting)
+ {
+       struct futex_q *match = futex_top_waiter(hb, key);
+ 
+@@ -1146,7 +1198,7 @@ static int lookup_pi_state(u32 uval, struct 
futex_hash_bucket *hb,
+        * We are the first waiter - try to look up the owner based on
+        * @uval and attach to it.
+        */
+-      return attach_to_pi_owner(uval, key, ps);
++      return attach_to_pi_owner(uval, key, ps, exiting);
+ }
+ 
+ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
+@@ -1172,6 +1224,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 
uval, u32 newval)
+  *                    lookup
+  * @task:             the task to perform the atomic lock work for.  This will
+  *                    be "current" except in the case of requeue pi.
++ * @exiting:          Pointer to store the task pointer of the owner task
++ *                    which is in the middle of exiting
+  * @set_waiters:      force setting the FUTEX_WAITERS bit (1) or not (0)
+  *
+  * Return:
+@@ -1180,11 +1234,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, 
u32 uval, u32 newval)
+  * <0 - error
+  *
+  * The hb->lock and futex_key refs shall be held by the caller.
++ *
++ * @exiting is only set when the return value is -EBUSY. If so, this holds
++ * a refcount on the exiting task on return and the caller needs to drop it
++ * after waiting for the exit to complete.
+  */
+ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket 
*hb,
+                               union futex_key *key,
+                               struct futex_pi_state **ps,
+-                              struct task_struct *task, int set_waiters)
++                              struct task_struct *task,
++                              struct task_struct **exiting,
++                              int set_waiters)
+ {
+       u32 uval, newval, vpid = task_pid_vnr(task);
+       struct futex_q *match;
+@@ -1254,7 +1314,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, 
struct futex_hash_bucket *hb,
+        * attach to the owner. If that fails, no harm done, we only
+        * set the FUTEX_WAITERS bit in the user space variable.
+        */
+-      return attach_to_pi_owner(uval, key, ps);
++      return attach_to_pi_owner(uval, key, ps, exiting);
+ }
+ 
+ /**
+@@ -1680,6 +1740,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union 
futex_key *key,
+  * @key1:             the from futex key
+  * @key2:             the to futex key
+  * @ps:                       address to store the pi_state pointer
++ * @exiting:          Pointer to store the task pointer of the owner task
++ *                    which is in the middle of exiting
+  * @set_waiters:      force setting the FUTEX_WAITERS bit (1) or not (0)
+  *
+  * Try and get the lock on behalf of the top waiter if we can do it 
atomically.
+@@ -1687,16 +1749,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union 
futex_key *key,
+  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
+  * hb1 and hb2 must be held by the caller.
+  *
++ * @exiting is only set when the return value is -EBUSY. If so, this holds
++ * a refcount on the exiting task on return and the caller needs to drop it
++ * after waiting for the exit to complete.
++ *
+  * Return:
+  *  0 - failed to acquire the lock atomically;
+  * >0 - acquired the lock, return value is vpid of the top_waiter
+  * <0 - error
+  */
+-static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+-                               struct futex_hash_bucket *hb1,
+-                               struct futex_hash_bucket *hb2,
+-                               union futex_key *key1, union futex_key *key2,
+-                               struct futex_pi_state **ps, int set_waiters)
++static int
++futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
++                         struct futex_hash_bucket *hb2, union futex_key *key1,
++                         union futex_key *key2, struct futex_pi_state **ps,
++                         struct task_struct **exiting, int set_waiters)
+ {
+       struct futex_q *top_waiter = NULL;
+       u32 curval;
+@@ -1733,7 +1799,7 @@ static int futex_proxy_trylock_atomic(u32 __user 
*pifutex,
+        */
+       vpid = task_pid_vnr(top_waiter->task);
+       ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
+-                                 set_waiters);
++                                 exiting, set_waiters);
+       if (ret == 1) {
+               requeue_pi_wake_futex(top_waiter, key2, hb2);
+               return vpid;
+@@ -1853,6 +1919,8 @@ retry_private:
+       }
+ 
+       if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
++              struct task_struct *exiting = NULL;
++
+               /*
+                * Attempt to acquire uaddr2 and wake the top waiter. If we
+                * intend to requeue waiters, force setting the FUTEX_WAITERS
+@@ -1860,7 +1928,8 @@ retry_private:
+                * faults rather in the requeue loop below.
+                */
+               ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
+-                                               &key2, &pi_state, nr_requeue);
++                                               &key2, &pi_state,
++                                               &exiting, nr_requeue);
+ 
+               /*
+                * At this point the top_waiter has either taken uaddr2 or is
+@@ -1884,7 +1953,8 @@ retry_private:
+                        * rereading and handing potential crap to
+                        * lookup_pi_state.
+                        */
+-                      ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
++                      ret = lookup_pi_state(ret, hb2, &key2,
++                                            &pi_state, &exiting);
+               }
+ 
+               switch (ret) {
+@@ -1901,12 +1971,13 @@ retry_private:
+                       if (!ret)
+                               goto retry;
+                       goto out;
++              case -EBUSY:
+               case -EAGAIN:
+                       /*
+                        * Two reasons for this:
+-                       * - Owner is exiting and we just wait for the
++                       * - EBUSY: Owner is exiting and we just wait for the
+                        *   exit to complete.
+-                       * - The user space value changed.
++                       * - EAGAIN: The user space value changed.
+                        */
+                       free_pi_state(pi_state);
+                       pi_state = NULL;
+@@ -1914,6 +1985,12 @@ retry_private:
+                       hb_waiters_dec(hb2);
+                       put_futex_key(&key2);
+                       put_futex_key(&key1);
++                      /*
++                       * Handle the case where the owner is in the middle of
++                       * exiting. Wait for the exit to complete otherwise
++                       * this task might loop forever, aka. live lock.
++                       */
++                      wait_for_owner_exiting(ret, exiting);
+                       cond_resched();
+                       goto retry;
+               default:
+@@ -2536,6 +2613,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int 
flags,
+                        ktime_t *time, int trylock)
+ {
+       struct hrtimer_sleeper timeout, *to = NULL;
++      struct task_struct *exiting = NULL;
+       struct futex_hash_bucket *hb;
+       struct futex_q q = futex_q_init;
+       int res, ret;
+@@ -2559,7 +2637,8 @@ retry:
+ retry_private:
+       hb = queue_lock(&q);
+ 
+-      ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
++      ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
++                                 &exiting, 0);
+       if (unlikely(ret)) {
+               /*
+                * Atomic work succeeded and we got the lock,
+@@ -2572,15 +2651,22 @@ retry_private:
+                       goto out_unlock_put_key;
+               case -EFAULT:
+                       goto uaddr_faulted;
++              case -EBUSY:
+               case -EAGAIN:
+                       /*
+                        * Two reasons for this:
+-                       * - Task is exiting and we just wait for the
++                       * - EBUSY: Task is exiting and we just wait for the
+                        *   exit to complete.
+-                       * - The user space value changed.
++                       * - EAGAIN: The user space value changed.
+                        */
+                       queue_unlock(hb);
+                       put_futex_key(&q.key);
++                      /*
++                       * Handle the case where the owner is in the middle of
++                       * exiting. Wait for the exit to complete otherwise
++                       * this task might loop forever, aka. live lock.
++                       */
++                      wait_for_owner_exiting(ret, exiting);
+                       cond_resched();
+                       goto retry;
+               default:
+@@ -3088,7 +3174,7 @@ err_unlock:
+  * Process a futex-list entry, check whether it's owned by the
+  * dying task, and do notification if so:
+  */
+-int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, 
int pi)
+ {
+       u32 uval, uninitialized_var(nval), mval;
+ 
+@@ -3163,7 +3249,7 @@ static inline int fetch_robust_entry(struct robust_list 
__user **entry,
+  *
+  * We silently return on any sign of list-walking problem.
+  */
+-void exit_robust_list(struct task_struct *curr)
++static void exit_robust_list(struct task_struct *curr)
+ {
+       struct robust_list_head __user *head = curr->robust_list;
+       struct robust_list __user *entry, *next_entry, *pending;
+@@ -3226,6 +3312,114 @@ void exit_robust_list(struct task_struct *curr)
+                                  curr, pip);
+ }
+ 
++static void futex_cleanup(struct task_struct *tsk)
++{
++      if (unlikely(tsk->robust_list)) {
++              exit_robust_list(tsk);
++              tsk->robust_list = NULL;
++      }
++
++#ifdef CONFIG_COMPAT
++      if (unlikely(tsk->compat_robust_list)) {
++              compat_exit_robust_list(tsk);
++              tsk->compat_robust_list = NULL;
++      }
++#endif
++
++      if (unlikely(!list_empty(&tsk->pi_state_list)))
++              exit_pi_state_list(tsk);
++}
++
++/**
++ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
++ * @tsk:      task to set the state on
++ *
++ * Set the futex exit state of the task lockless. The futex waiter code
++ * observes that state when a task is exiting and loops until the task has
++ * actually finished the futex cleanup. The worst case for this is that the
++ * waiter runs through the wait loop until the state becomes visible.
++ *
++ * This is called from the recursive fault handling path in do_exit().
++ *
++ * This is best effort. Either the futex exit code has run already or
++ * not. If the OWNER_DIED bit has been set on the futex then the waiter can
++ * take it over. If not, the problem is pushed back to user space. If the
++ * futex exit code did not run yet, then an already queued waiter might
++ * block forever, but there is nothing which can be done about that.
++ */
++void futex_exit_recursive(struct task_struct *tsk)
++{
++      /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
++      if (tsk->futex_state == FUTEX_STATE_EXITING)
++              mutex_unlock(&tsk->futex_exit_mutex);
++      tsk->futex_state = FUTEX_STATE_DEAD;
++}
++
++static void futex_cleanup_begin(struct task_struct *tsk)
++{
++      /*
++       * Prevent various race issues against a concurrent incoming waiter
++       * including live locks by forcing the waiter to block on
++       * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
++       * attach_to_pi_owner().
++       */
++      mutex_lock(&tsk->futex_exit_mutex);
++
++      /*
++       * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
++       *
++       * This ensures that all subsequent checks of tsk->futex_state in
++       * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
++       * tsk->pi_lock held.
++       *
++       * It guarantees also that a pi_state which was queued right before
++       * the state change under tsk->pi_lock by a concurrent waiter must
++       * be observed in exit_pi_state_list().
++       */
++      raw_spin_lock_irq(&tsk->pi_lock);
++      tsk->futex_state = FUTEX_STATE_EXITING;
++      raw_spin_unlock_irq(&tsk->pi_lock);
++}
++
++static void futex_cleanup_end(struct task_struct *tsk, int state)
++{
++      /*
++       * Lockless store. The only side effect is that an observer might
++       * take another loop until it becomes visible.
++       */
++      tsk->futex_state = state;
++      /*
++       * Drop the exit protection. This unblocks waiters which observed
++       * FUTEX_STATE_EXITING to reevaluate the state.
++       */
++      mutex_unlock(&tsk->futex_exit_mutex);
++}
++
++void futex_exec_release(struct task_struct *tsk)
++{
++      /*
++       * The state handling is done for consistency, but in the case of
++       * exec() there is no way to prevent futher damage as the PID stays
++       * the same. But for the unlikely and arguably buggy case that a
++       * futex is held on exec(), this provides at least as much state
++       * consistency protection which is possible.
++       */
++      futex_cleanup_begin(tsk);
++      futex_cleanup(tsk);
++      /*
++       * Reset the state to FUTEX_STATE_OK. The task is alive and about
++       * exec a new binary.
++       */
++      futex_cleanup_end(tsk, FUTEX_STATE_OK);
++}
++
++void futex_exit_release(struct task_struct *tsk)
++{
++      futex_cleanup_begin(tsk);
++      futex_cleanup(tsk);
++      futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
++}
++
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+               u32 __user *uaddr2, u32 val2, u32 val3)
+ {
+@@ -3318,6 +3512,192 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, 
u32, val,
+       return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
+ }
+ 
++#ifdef CONFIG_COMPAT
++/*
++ * Fetch a robust-list pointer. Bit 0 signals PI futexes:
++ */
++static inline int
++compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user 
**entry,
++                 compat_uptr_t __user *head, unsigned int *pi)
++{
++      if (get_user(*uentry, head))
++              return -EFAULT;
++
++      *entry = compat_ptr((*uentry) & ~1);
++      *pi = (unsigned int)(*uentry) & 1;
++
++      return 0;
++}
++
++static void __user *futex_uaddr(struct robust_list __user *entry,
++                              compat_long_t futex_offset)
++{
++      compat_uptr_t base = ptr_to_compat(entry);
++      void __user *uaddr = compat_ptr(base + futex_offset);
++
++      return uaddr;
++}
++
++/*
++ * Walk curr->robust_list (very carefully, it's a userspace list!)
++ * and mark any locks found there dead, and notify any waiters.
++ *
++ * We silently return on any sign of list-walking problem.
++ */
++void compat_exit_robust_list(struct task_struct *curr)
++{
++      struct compat_robust_list_head __user *head = curr->compat_robust_list;
++      struct robust_list __user *entry, *next_entry, *pending;
++      unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
++      unsigned int uninitialized_var(next_pi);
++      compat_uptr_t uentry, next_uentry, upending;
++      compat_long_t futex_offset;
++      int rc;
++
++      if (!futex_cmpxchg_enabled)
++              return;
++
++      /*
++       * Fetch the list head (which was registered earlier, via
++       * sys_set_robust_list()):
++       */
++      if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
++              return;
++      /*
++       * Fetch the relative futex offset:
++       */
++      if (get_user(futex_offset, &head->futex_offset))
++              return;
++      /*
++       * Fetch any possibly pending lock-add first, and handle it
++       * if it exists:
++       */
++      if (compat_fetch_robust_entry(&upending, &pending,
++                             &head->list_op_pending, &pip))
++              return;
++
++      next_entry = NULL;      /* avoid warning with gcc */
++      while (entry != (struct robust_list __user *) &head->list) {
++              /*
++               * Fetch the next entry in the list before calling
++               * handle_futex_death:
++               */
++              rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
++                      (compat_uptr_t __user *)&entry->next, &next_pi);
++              /*
++               * A pending lock might already be on the list, so
++               * dont process it twice:
++               */
++              if (entry != pending) {
++                      void __user *uaddr = futex_uaddr(entry, futex_offset);
++
++                      if (handle_futex_death(uaddr, curr, pi))
++                              return;
++              }
++              if (rc)
++                      return;
++              uentry = next_uentry;
++              entry = next_entry;
++              pi = next_pi;
++              /*
++               * Avoid excessively long or circular lists:
++               */
++              if (!--limit)
++                      break;
++
++              cond_resched();
++      }
++      if (pending) {
++              void __user *uaddr = futex_uaddr(pending, futex_offset);
++
++              handle_futex_death(uaddr, curr, pip);
++      }
++}
++
++COMPAT_SYSCALL_DEFINE2(set_robust_list,
++              struct compat_robust_list_head __user *, head,
++              compat_size_t, len)
++{
++      if (!futex_cmpxchg_enabled)
++              return -ENOSYS;
++
++      if (unlikely(len != sizeof(*head)))
++              return -EINVAL;
++
++      current->compat_robust_list = head;
++
++      return 0;
++}
++
++COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
++                      compat_uptr_t __user *, head_ptr,
++                      compat_size_t __user *, len_ptr)
++{
++      struct compat_robust_list_head __user *head;
++      unsigned long ret;
++      struct task_struct *p;
++
++      if (!futex_cmpxchg_enabled)
++              return -ENOSYS;
++
++      rcu_read_lock();
++
++      ret = -ESRCH;
++      if (!pid)
++              p = current;
++      else {
++              p = find_task_by_vpid(pid);
++              if (!p)
++                      goto err_unlock;
++      }
++
++      ret = -EPERM;
++      if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
++              goto err_unlock;
++
++      head = p->compat_robust_list;
++      rcu_read_unlock();
++
++      if (put_user(sizeof(*head), len_ptr))
++              return -EFAULT;
++      return put_user(ptr_to_compat(head), head_ptr);
++
++err_unlock:
++      rcu_read_unlock();
++
++      return ret;
++}
++
++COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
++              struct compat_timespec __user *, utime, u32 __user *, uaddr2,
++              u32, val3)
++{
++      struct timespec ts;
++      ktime_t t, *tp = NULL;
++      int val2 = 0;
++      int cmd = op & FUTEX_CMD_MASK;
++
++      if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
++                    cmd == FUTEX_WAIT_BITSET ||
++                    cmd == FUTEX_WAIT_REQUEUE_PI)) {
++              if (compat_get_timespec(&ts, utime))
++                      return -EFAULT;
++              if (!timespec_valid(&ts))
++                      return -EINVAL;
++
++              t = timespec_to_ktime(ts);
++              if (cmd == FUTEX_WAIT)
++                      t = ktime_add_safe(ktime_get(), t);
++              tp = &t;
++      }
++      if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
++          cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
++              val2 = (int) (unsigned long) utime;
++
++      return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
++}
++#endif /* CONFIG_COMPAT */
++
+ static void __init futex_detect_cmpxchg(void)
+ {
+ #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+deleted file mode 100644
+index 4ae3232e7a28a..0000000000000
+--- a/kernel/futex_compat.c
++++ /dev/null
+@@ -1,201 +0,0 @@
+-/*
+- * linux/kernel/futex_compat.c
+- *
+- * Futex compatibililty routines.
+- *
+- * Copyright 2006, Red Hat, Inc., Ingo Molnar
+- */
+-
+-#include <linux/linkage.h>
+-#include <linux/compat.h>
+-#include <linux/nsproxy.h>
+-#include <linux/futex.h>
+-#include <linux/ptrace.h>
+-#include <linux/syscalls.h>
+-
+-#include <asm/uaccess.h>
+-
+-
+-/*
+- * Fetch a robust-list pointer. Bit 0 signals PI futexes:
+- */
+-static inline int
+-fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
+-                 compat_uptr_t __user *head, unsigned int *pi)
+-{
+-      if (get_user(*uentry, head))
+-              return -EFAULT;
+-
+-      *entry = compat_ptr((*uentry) & ~1);
+-      *pi = (unsigned int)(*uentry) & 1;
+-
+-      return 0;
+-}
+-
+-static void __user *futex_uaddr(struct robust_list __user *entry,
+-                              compat_long_t futex_offset)
+-{
+-      compat_uptr_t base = ptr_to_compat(entry);
+-      void __user *uaddr = compat_ptr(base + futex_offset);
+-
+-      return uaddr;
+-}
+-
+-/*
+- * Walk curr->robust_list (very carefully, it's a userspace list!)
+- * and mark any locks found there dead, and notify any waiters.
+- *
+- * We silently return on any sign of list-walking problem.
+- */
+-void compat_exit_robust_list(struct task_struct *curr)
+-{
+-      struct compat_robust_list_head __user *head = curr->compat_robust_list;
+-      struct robust_list __user *entry, *next_entry, *pending;
+-      unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+-      unsigned int uninitialized_var(next_pi);
+-      compat_uptr_t uentry, next_uentry, upending;
+-      compat_long_t futex_offset;
+-      int rc;
+-
+-      if (!futex_cmpxchg_enabled)
+-              return;
+-
+-      /*
+-       * Fetch the list head (which was registered earlier, via
+-       * sys_set_robust_list()):
+-       */
+-      if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
+-              return;
+-      /*
+-       * Fetch the relative futex offset:
+-       */
+-      if (get_user(futex_offset, &head->futex_offset))
+-              return;
+-      /*
+-       * Fetch any possibly pending lock-add first, and handle it
+-       * if it exists:
+-       */
+-      if (fetch_robust_entry(&upending, &pending,
+-                             &head->list_op_pending, &pip))
+-              return;
+-
+-      next_entry = NULL;      /* avoid warning with gcc */
+-      while (entry != (struct robust_list __user *) &head->list) {
+-              /*
+-               * Fetch the next entry in the list before calling
+-               * handle_futex_death:
+-               */
+-              rc = fetch_robust_entry(&next_uentry, &next_entry,
+-                      (compat_uptr_t __user *)&entry->next, &next_pi);
+-              /*
+-               * A pending lock might already be on the list, so
+-               * dont process it twice:
+-               */
+-              if (entry != pending) {
+-                      void __user *uaddr = futex_uaddr(entry, futex_offset);
+-
+-                      if (handle_futex_death(uaddr, curr, pi))
+-                              return;
+-              }
+-              if (rc)
+-                      return;
+-              uentry = next_uentry;
+-              entry = next_entry;
+-              pi = next_pi;
+-              /*
+-               * Avoid excessively long or circular lists:
+-               */
+-              if (!--limit)
+-                      break;
+-
+-              cond_resched();
+-      }
+-      if (pending) {
+-              void __user *uaddr = futex_uaddr(pending, futex_offset);
+-
+-              handle_futex_death(uaddr, curr, pip);
+-      }
+-}
+-
+-COMPAT_SYSCALL_DEFINE2(set_robust_list,
+-              struct compat_robust_list_head __user *, head,
+-              compat_size_t, len)
+-{
+-      if (!futex_cmpxchg_enabled)
+-              return -ENOSYS;
+-
+-      if (unlikely(len != sizeof(*head)))
+-              return -EINVAL;
+-
+-      current->compat_robust_list = head;
+-
+-      return 0;
+-}
+-
+-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
+-                      compat_uptr_t __user *, head_ptr,
+-                      compat_size_t __user *, len_ptr)
+-{
+-      struct compat_robust_list_head __user *head;
+-      unsigned long ret;
+-      struct task_struct *p;
+-
+-      if (!futex_cmpxchg_enabled)
+-              return -ENOSYS;
+-
+-      rcu_read_lock();
+-
+-      ret = -ESRCH;
+-      if (!pid)
+-              p = current;
+-      else {
+-              p = find_task_by_vpid(pid);
+-              if (!p)
+-                      goto err_unlock;
+-      }
+-
+-      ret = -EPERM;
+-      if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
+-              goto err_unlock;
+-
+-      head = p->compat_robust_list;
+-      rcu_read_unlock();
+-
+-      if (put_user(sizeof(*head), len_ptr))
+-              return -EFAULT;
+-      return put_user(ptr_to_compat(head), head_ptr);
+-
+-err_unlock:
+-      rcu_read_unlock();
+-
+-      return ret;
+-}
+-
+-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+-              struct compat_timespec __user *, utime, u32 __user *, uaddr2,
+-              u32, val3)
+-{
+-      struct timespec ts;
+-      ktime_t t, *tp = NULL;
+-      int val2 = 0;
+-      int cmd = op & FUTEX_CMD_MASK;
+-
+-      if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
+-                    cmd == FUTEX_WAIT_BITSET ||
+-                    cmd == FUTEX_WAIT_REQUEUE_PI)) {
+-              if (compat_get_timespec(&ts, utime))
+-                      return -EFAULT;
+-              if (!timespec_valid(&ts))
+-                      return -EINVAL;
+-
+-              t = timespec_to_ktime(ts);
+-              if (cmd == FUTEX_WAIT)
+-                      t = ktime_add_safe(ktime_get(), t);
+-              tp = &t;
+-      }
+-      if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
+-          cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
+-              val2 = (int) (unsigned long) utime;
+-
+-      return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
+-}
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index a991d1df6774d..1046520d726d8 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1027,6 +1027,7 @@ enum queue_stop_reason {
+       IEEE80211_QUEUE_STOP_REASON_FLUSH,
+       IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
+       IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
++      IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE,
+ 
+       IEEE80211_QUEUE_STOP_REASONS,
+ };
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 519def0e15f17..6d12a893eb11c 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1507,6 +1507,10 @@ static int ieee80211_runtime_change_iftype(struct 
ieee80211_sub_if_data *sdata,
+       if (ret)
+               return ret;
+ 
++      ieee80211_stop_vif_queues(local, sdata,
++                                IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
++      synchronize_net();
++
+       ieee80211_do_stop(sdata, false);
+ 
+       ieee80211_teardown_sdata(sdata);
+@@ -1527,6 +1531,8 @@ static int ieee80211_runtime_change_iftype(struct 
ieee80211_sub_if_data *sdata,
+       err = ieee80211_do_open(&sdata->wdev, false);
+       WARN(err, "type change: do_open returned %d", err);
+ 
++      ieee80211_wake_vif_queues(local, sdata,
++                                IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
+       return ret;
+ }
+ 
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index a6c29c5bbfbd4..d6fcfc9954202 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -189,8 +189,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+               nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR,
+                                      priv->expr->ops->size);
+       if (set->flags & NFT_SET_TIMEOUT) {
+-              if (timeout || set->timeout)
++              if (timeout || set->timeout) {
++                      nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
+                       nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
++              }
+       }
+ 
+       priv->timeout = timeout;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 639e5cad0442c..c90dc2fd13a7c 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -850,6 +850,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct 
genl_info *info)
+ 
+       if (!dev->polling) {
+               device_unlock(&dev->dev);
++              nfc_put_device(dev);
+               return -EINVAL;
+       }
+ 
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 574af981806fa..92a3cfae4de87 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -117,7 +117,7 @@ static int rawsock_connect(struct socket *sock, struct 
sockaddr *_addr,
+       if (addr->target_idx > dev->target_next_idx - 1 ||
+           addr->target_idx < dev->target_next_idx - dev->n_targets) {
+               rc = -EINVAL;
+-              goto error;
++              goto put_dev;
+       }
+ 
+       rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index 843d2cf1e6a6c..9a929010ea9d0 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -895,8 +895,9 @@ out:
+ int call_commit_handler(struct net_device *dev)
+ {
+ #ifdef CONFIG_WIRELESS_EXT
+-      if ((netif_running(dev)) &&
+-         (dev->wireless_handlers->standard[0] != NULL))
++      if (netif_running(dev) &&
++          dev->wireless_handlers &&
++          dev->wireless_handlers->standard[0])
+               /* Call the commit handler on the driver */
+               return dev->wireless_handlers->standard[0](dev, NULL,
+                                                          NULL, NULL);
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 1e87639f2c270..d613bf77cc0f9 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -315,7 +315,7 @@ resume:
+               /* only the first xfrm gets the encap type */
+               encap_type = 0;
+ 
+-              if (async && x->repl->recheck(x, skb, seq)) {
++              if (x->repl->recheck(x, skb, seq)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
+                       goto drop_unlock;
+               }

Reply via email to