Imbed struct utrace in task_struct.

One of the issues debugging utrace problems is the involvement of RCU
for protecting struct utrace and the subtle races it introduces with
task_struct lifetimes. This patch will hopefully push utrace along
further on the path of upstream acceptance.

If its deemed necessary to put back struct utrace under RCU, maybe that
can be done after utrace stabilizes without it.

Tested on x86 (uni/smp) and powerpc -- patch applies on the current
utrace/utrace-ptrace branch.

With this patch, I haven't seen any WARN_ON(task->last_siginfo != info)
on x86; the frequency of its occurance on powerpc has reduced
considerably. One one make check xcheck run, there were only two such
backtraces while earlier, there were many tens of them:

------------[ cut here ]------------
Badness at kernel/ptrace.c:530
NIP: c00000000007e2fc LR: c0000000000c0004 CTR: c00000000007e15c
REGS: c00000005681f800 TRAP: 0700   Tainted: G        W   (2.6.29-rc1-ut)
MSR: 8000000000029032 <EE,ME,CE,IR,DR>  CR: 44002428  XER: 20000000
TASK = c000000056790000[23664] 'exe' THREAD: c00000005681c000 CPU: 1
NIP [c00000000007e2fc] .ptrace_report_signal+0x1a0/0x2d4
LR [c0000000000c0004] .utrace_get_signal+0x3b0/0x6cc
Call Trace:
[c00000005681fa80] [c000000000956790]
klist_remove_waiters+0xf7a8/0x2f8b8 (unreliable)
[c00000005681fb30] [c0000000000c0004] .utrace_get_signal+0x3b0/0x6cc
[c00000005681fc20] [c000000000084a14] .get_signal_to_deliver+0x14c/0x368
[c00000005681fce0] [c000000000014ed4] .do_signal+0x7c/0x338
[c00000005681fe30] [c000000000008a80] do_work+0x24/0x28
Instruction dump:
f81a0020 e87e8008 4857fa59 60000000 2fbd0000 419e0034 e81b12a0 2fa00000 
419e0028 7c00e278 3120ffff 7c090110 <0b000000> e93b0216 3b400000 fb5b12a0 
------------------

Thanks to Alexey Dobriyan for his initial work way back in 2007.

There are no new regressions in the ptrace-utrace tests on x86. However,
on powerpc, two tests consistantly fail, with the patch (haven't yet
tested if they happen without it):

step-jump-cont: step-jump-cont.c:140: pokeuser: Assertion `l == 0' failed.
/bin/sh: line 4: 32479 Aborted                 ${dir}$tst
FAIL: step-jump-cont
errno 14 (Bad address)
syscall-reset: syscall-reset.c:95: main: Assertion `(*__errno_location ()) == 
38' failed.
unexpected child status 67f
FAIL: syscall-reset

Signed-off-by: Ananth N Mavinakayanahalli <ana...@in.ibm.com>
---
 include/linux/sched.h     |    4 
 include/linux/tracehook.h |   16 -
 include/linux/utrace.h    |   69 ++++++--
 kernel/ptrace.c           |   11 +
 kernel/utrace.c           |  385 ++++++++++++----------------------------------
 5 files changed, 166 insertions(+), 319 deletions(-)

Index: utrace-19jan/include/linux/sched.h
===================================================================
--- utrace-19jan.orig/include/linux/sched.h
+++ utrace-19jan/include/linux/sched.h
@@ -88,6 +88,7 @@ struct sched_param {
 #include <linux/kobject.h>
 #include <linux/latencytop.h>
 #include <linux/cred.h>
+#include <linux/utrace.h>
 
 #include <asm/processor.h>
 
@@ -1267,8 +1268,7 @@ struct task_struct {
        seccomp_t seccomp;
 
 #ifdef CONFIG_UTRACE
-       struct utrace *utrace;
-       unsigned long utrace_flags;
+       struct utrace utrace;
 #endif
 
 /* Thread group tracking */
Index: utrace-19jan/include/linux/utrace.h
===================================================================
--- utrace-19jan.orig/include/linux/utrace.h
+++ utrace-19jan/include/linux/utrace.h
@@ -33,17 +33,62 @@
 #include <linux/list.h>
 #include <linux/kref.h>
 #include <linux/signal.h>
-#include <linux/sched.h>
+#include <linux/pid.h>
 
 struct linux_binprm;
+struct linux_binfmt;
 struct pt_regs;
-struct utrace;
+struct task_struct;
 struct user_regset;
 struct user_regset_view;
+struct seq_file;
+
+#define UTRACE_DEBUG 1
+/*
+ * Per-thread structure task_struct.utrace refers to.
+ *
+ * The two lists @attached and @attaching work together for smooth
+ * asynchronous attaching with low overhead.  Modifying either list
+ * requires @lock.  The @attaching list can be modified any time while
+ * holding @lock.  New engines being attached always go on this list.
+ *
+ * The @attached list is what the task itself uses for its reporting
+ * loops.  When the task itself is not quiescent, it can use the
+ * @attached list without taking any lock.  Noone may modify the list
+ * when the task is not quiescent.  When it is quiescent, that means
+ * that it won't run again without taking @lock itself before using
+ * the list.
+ *
+ * At each place where we know the task is quiescent (or it's current),
+ * while holding @lock, we call splice_attaching(), below.  This moves
+ * the @attaching list members on to the end of the @attached list.
+ * Since this happens at the start of any reporting pass, any new
+ * engines attached asynchronously go on the stable @attached list
+ * in time to have their callbacks seen.
+ */
+struct utrace {
+       unsigned long flags;
+       struct task_struct *cloning;
+       struct list_head attached, attaching;
+       spinlock_t lock;
+#ifdef UTRACE_DEBUG
+       atomic_t check_dead;
+#endif
+
+       struct utrace_attached_engine *reporting;
+
+       unsigned int stopped:1;
+       unsigned int report:1;
+       unsigned int interrupt:1;
+       unsigned int signal_handler:1;
+       unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */
+       unsigned int death:1;   /* in utrace_report_death() now */
+       unsigned int reap:1;    /* release_task() has run */
+};
 
 /*
  * Event bits passed to utrace_set_events().
- * These appear in &struct task_stru...@utrace_flags
+ * These appear in &struct task_stru...@utrace.flags
  * and &struct utrace_attached_engi...@flags.
  */
 enum utrace_events {
@@ -144,22 +189,10 @@ static inline void task_utrace_proc_stat
 
 #else  /* CONFIG_UTRACE */
 
-static inline unsigned long task_utrace_flags(struct task_struct *task)
-{
-       return task->utrace_flags;
-}
-
-static inline struct utrace *task_utrace_struct(struct task_struct *task)
-{
-       return task->utrace;
-}
-
-static inline void utrace_init_task(struct task_struct *child)
-{
-       child->utrace_flags = 0;
-       child->utrace = NULL;
-}
+#define task_utrace_flags(task)                ((task)->utrace.flags)
+#define task_utrace_struct(task)       (&(task)->utrace)
 
+void utrace_init_task(struct task_struct *task);
 void task_utrace_proc_status(struct seq_file *m, struct task_struct *p);
 
 /**
Index: utrace-19jan/kernel/utrace.c
===================================================================
--- utrace-19jan.orig/kernel/utrace.c
+++ utrace-19jan/kernel/utrace.c
@@ -10,21 +10,20 @@
  * Red Hat Author: Roland McGrath.
  */
 
-#include <linux/utrace.h>
+#include <linux/sched.h>
 #include <linux/tracehook.h>
 #include <linux/regset.h>
 #include <asm/syscall.h>
 #include <linux/ptrace.h>
 #include <linux/err.h>
-#include <linux/sched.h>
 #include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
+#include <linux/utrace.h>
 
 
-#define UTRACE_DEBUG 1
 #ifdef UTRACE_DEBUG
 #define CHECK_INIT(p)  atomic_set(&(p)->check_dead, 1)
 #define CHECK_DEAD(p)  BUG_ON(!atomic_dec_and_test(&(p)->check_dead))
@@ -33,91 +32,25 @@
 #define CHECK_DEAD(p)  do { } while (0)
 #endif
 
-/*
- * Per-thread structure task_struct.utrace points to.
- *
- * The task itself never has to worry about this going away after
- * some event is found set in task_struct.utrace_flags.
- * Once created, this pointer is changed only when the task is quiescent
- * (TASK_TRACED or TASK_STOPPED with the siglock held, or dead).
- *
- * For other parties, the pointer to this is protected by RCU and
- * task_lock.  Since call_rcu is never used while the thread is alive and
- * using this struct utrace, we can overlay the RCU data structure used
- * only for a dead struct with some local state used only for a live utrace
- * on an active thread.
- *
- * The two lists @attached and @attaching work together for smooth
- * asynchronous attaching with low overhead.  Modifying either list
- * requires @lock.  The @attaching list can be modified any time while
- * holding @lock.  New engines being attached always go on this list.
- *
- * The @attached list is what the task itself uses for its reporting
- * loops.  When the task itself is not quiescent, it can use the
- * @attached list without taking any lock.  Noone may modify the list
- * when the task is not quiescent.  When it is quiescent, that means
- * that it won't run again without taking @lock itself before using
- * the list.
- *
- * At each place where we know the task is quiescent (or it's current),
- * while holding @lock, we call splice_attaching(), below.  This moves
- * the @attaching list members on to the end of the @attached list.
- * Since this happens at the start of any reporting pass, any new
- * engines attached asynchronously go on the stable @attached list
- * in time to have their callbacks seen.
- */
-struct utrace {
-       union {
-               struct rcu_head dead;
-               struct {
-                       struct task_struct *cloning;
-               } live;
-       } u;
-
-       struct list_head attached, attaching;
-       spinlock_t lock;
-#ifdef UTRACE_DEBUG
-       atomic_t check_dead;
-#endif
-
-       struct utrace_attached_engine *reporting;
-
-       unsigned int stopped:1;
-       unsigned int report:1;
-       unsigned int interrupt:1;
-       unsigned int signal_handler:1;
-       unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */
-       unsigned int death:1;   /* in utrace_report_death() now */
-       unsigned int reap:1;    /* release_task() has run */
-};
-
-static struct kmem_cache *utrace_cachep;
 static struct kmem_cache *utrace_engine_cachep;
 static const struct utrace_engine_ops utrace_detached_ops; /* forward decl */
 
 static int __init utrace_init(void)
 {
-       utrace_cachep = KMEM_CACHE(utrace, SLAB_PANIC);
        utrace_engine_cachep = KMEM_CACHE(utrace_attached_engine, SLAB_PANIC);
        return 0;
 }
 module_init(utrace_init);
 
-static void utrace_free(struct rcu_head *rhead)
+void utrace_init_task(struct task_struct *task)
 {
-       struct utrace *utrace = container_of(rhead, struct utrace, u.dead);
-       kmem_cache_free(utrace_cachep, utrace);
-}
+       struct utrace *utrace = task_utrace_struct(task);
 
-/*
- * Called with utrace locked.  Clean it up and free it via RCU.
- */
-static void rcu_utrace_free(struct utrace *utrace)
-       __releases(utrace->lock)
-{
-       CHECK_DEAD(utrace);
-       spin_unlock(&utrace->lock);
-       call_rcu(&utrace->u.dead, utrace_free);
+       utrace->flags = 0;
+       utrace->cloning = NULL;
+       INIT_LIST_HEAD(&utrace->attached);
+       INIT_LIST_HEAD(&utrace->attaching);
+       spin_lock_init(&utrace->lock);
 }
 
 /*
@@ -202,8 +135,8 @@ static int utrace_first_engine(struct ta
         * report_clone hook has had a chance to run.
         */
        if (target->flags & PF_STARTING) {
-               utrace = current->utrace;
-               if (!utrace || utrace->u.live.cloning != target) {
+               utrace = task_utrace_struct(current);
+               if (utrace->cloning != target) {
                        yield();
                        if (signal_pending(current))
                                return -ERESTARTNOINTR;
@@ -211,14 +144,8 @@ static int utrace_first_engine(struct ta
                }
        }
 
-       utrace = kmem_cache_zalloc(utrace_cachep, GFP_KERNEL);
-       if (unlikely(!utrace))
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&utrace->attached);
-       INIT_LIST_HEAD(&utrace->attaching);
+       utrace = task_utrace_struct(target);
        list_add(&engine->entry, &utrace->attached);
-       spin_lock_init(&utrace->lock);
        CHECK_INIT(utrace);
 
        ret = -EAGAIN;
@@ -226,9 +153,7 @@ static int utrace_first_engine(struct ta
        task_lock(target);
        if (exclude_utrace(target)) {
                ret = -EBUSY;
-       } else if (likely(!target->utrace)) {
-               rcu_assign_pointer(target->utrace, utrace);
-
+       } else {
                /*
                 * The task_lock protects us against another thread doing
                 * the same thing.  We might still be racing against
@@ -246,30 +171,20 @@ static int utrace_first_engine(struct ta
                        spin_unlock(&utrace->lock);
                        return 0;
                }
-
-               /*
-                * The target has already been through release_task.
-                * Our caller will restart and notice it's too late now.
-                */
-               target->utrace = NULL;
        }
 
        /*
-        * Another engine attached first, so there is a struct already.
-        * A null return says to restart looking for the existing one.
+        * Another engine attached first.
+        * Restart looking for the existing one.
         */
        task_unlock(target);
        spin_unlock(&utrace->lock);
-       kmem_cache_free(utrace_cachep, utrace);
 
        return ret;
 }
 
 /*
- * Called with rcu_read_lock() held.
- * Lock utrace and verify that it's still installed in target->utrace.
- * If not, return -EAGAIN.
- * Then enqueue engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE.
+ * Enqueue engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE.
  */
 static int utrace_second_engine(struct task_struct *target,
                                struct utrace *utrace,
@@ -282,13 +197,7 @@ static int utrace_second_engine(struct t
 
        spin_lock(&utrace->lock);
 
-       if (unlikely(rcu_dereference(target->utrace) != utrace)) {
-               /*
-                * We lost a race with other CPUs doing a sequence
-                * of detach and attach before we got in.
-                */
-               ret = -EAGAIN;
-       } else if ((flags & UTRACE_ATTACH_EXCLUSIVE) &&
+       if ((flags & UTRACE_ATTACH_EXCLUSIVE) &&
                   unlikely(matching_engine(utrace, flags, ops, data))) {
                ret = -EEXIST;
        } else {
@@ -350,18 +259,15 @@ struct utrace_attached_engine *utrace_at
 {
        struct utrace *utrace;
        struct utrace_attached_engine *engine;
-       int ret;
+       int ret = 0;
 
 restart:
-       rcu_read_lock();
-       utrace = rcu_dereference(target->utrace);
-       smp_rmb();
+       utrace = task_utrace_struct(target);
        if (unlikely(target->exit_state == EXIT_DEAD)) {
                /*
                 * The target has already been reaped.
                 * Check this first; a race with reaping may lead to restart.
                 */
-               rcu_read_unlock();
                if (!(flags & UTRACE_ATTACH_CREATE))
                        return ERR_PTR(-ENOENT);
                return ERR_PTR(-ESRCH);
@@ -369,19 +275,14 @@ restart:
 
        if (!(flags & UTRACE_ATTACH_CREATE)) {
                engine = NULL;
-               if (utrace) {
-                       spin_lock(&utrace->lock);
-                       engine = matching_engine(utrace, flags, ops, data);
-                       if (engine)
-                               utrace_engine_get(engine);
-                       spin_unlock(&utrace->lock);
-               }
-               rcu_read_unlock();
+               spin_lock(&utrace->lock);
+               engine = matching_engine(utrace, flags, ops, data);
+               if (engine)
+                       utrace_engine_get(engine);
+               spin_unlock(&utrace->lock);
                return engine ?: ERR_PTR(-ENOENT);
        }
 
-       rcu_read_unlock();
-
        if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops))
                return ERR_PTR(-EINVAL);
 
@@ -404,15 +305,12 @@ restart:
        engine->ops = ops;
        engine->data = data;
 
-       rcu_read_lock();
-       utrace = rcu_dereference(target->utrace);
-       if (!utrace) {
-               rcu_read_unlock();
+       if ((ret == 0) && (list_empty(&utrace->attached))) {
+               /* First time here, set engines up */
                ret = utrace_first_engine(target, engine);
        } else {
                ret = utrace_second_engine(target, utrace, engine,
                                           flags, ops, data);
-               rcu_read_unlock();
        }
 
        if (unlikely(ret)) {
@@ -561,28 +459,23 @@ static bool utrace_stop(struct task_stru
        try_to_freeze();
 
        killed = false;
-       rcu_read_lock();
-       utrace = rcu_dereference(task->utrace);
-       if (utrace) {
+       /*
+        * utrace_wakeup() clears @utrace->stopped before waking us up.
+        * We're officially awake if it's clear.
+        */
+       spin_lock(&utrace->lock);
+       if (unlikely(utrace->stopped)) {
                /*
-                * utrace_wakeup() clears @utrace->stopped before waking us up.
-                * We're officially awake if it's clear.
+                * If we're here with it still set, it must have been
+                * signal_wake_up() instead, waking us up for a SIGKILL.
                 */
-               spin_lock(&utrace->lock);
-               if (unlikely(utrace->stopped)) {
-                       /*
-                        * If we're here with it still set, it must have been
-                        * signal_wake_up() instead, waking us up for a SIGKILL.
-                        */
-                       spin_lock_irq(&task->sighand->siglock);
-                       WARN_ON(!sigismember(&task->pending.signal, SIGKILL));
-                       spin_unlock_irq(&task->sighand->siglock);
-                       utrace->stopped = 0;
-                       killed = true;
-               }
-               spin_unlock(&utrace->lock);
+               spin_lock_irq(&task->sighand->siglock);
+               WARN_ON(!sigismember(&task->pending.signal, SIGKILL));
+               spin_unlock_irq(&task->sighand->siglock);
+               utrace->stopped = 0;
+               killed = true;
        }
-       rcu_read_unlock();
+       spin_unlock(&utrace->lock);
 
        /*
         * While we were in TASK_TRACED, complete_signal() considered
@@ -619,6 +512,7 @@ static struct utrace *get_utrace_lock(st
        __acquires(utrace->lock)
 {
        struct utrace *utrace;
+       int ret = 0;
 
        /*
         * You must hold a ref to be making a call.  A call from within
@@ -650,7 +544,7 @@ static struct utrace *get_utrace_lock(st
                return attached ? ERR_PTR(-ESRCH) : ERR_PTR(-ERESTARTSYS);
        }
 
-       utrace = rcu_dereference(target->utrace);
+       utrace = task_utrace_struct(target);
        smp_rmb();
        if (unlikely(!utrace) || unlikely(target->exit_state == EXIT_DEAD)) {
                /*
@@ -659,24 +553,26 @@ static struct utrace *get_utrace_lock(st
                 * have started.  A call to this engine's report_reap
                 * callback might already be in progress.
                 */
-               utrace = ERR_PTR(-ESRCH);
+               ret = -ESRCH;
        } else {
                spin_lock(&utrace->lock);
-               if (unlikely(rcu_dereference(target->utrace) != utrace) ||
-                   unlikely(!engine->ops) ||
+               if (unlikely(!engine->ops) ||
                    unlikely(engine->ops == &utrace_detached_ops)) {
                        /*
                         * By the time we got the utrace lock,
                         * it had been reaped or detached already.
                         */
                        spin_unlock(&utrace->lock);
-                       utrace = ERR_PTR(-ESRCH);
+                       ret = -ESRCH;
                        if (!attached && engine->ops == &utrace_detached_ops)
-                               utrace = ERR_PTR(-ERESTARTSYS);
+                               ret = -ERESTARTSYS;
                }
        }
        rcu_read_unlock();
 
+       if (ret)
+               return ERR_PTR(ret);
+
        return utrace;
 }
 
@@ -732,8 +628,8 @@ restart:
                goto restart;
        }
 
-       rcu_utrace_free(utrace); /* Releases the lock.  */
-
+       CHECK_DEAD(utrace);
+       spin_unlock(&utrace->lock);
        put_detached_list(&detached);
 }
 
@@ -744,15 +640,7 @@ restart:
  */
 void utrace_release_task(struct task_struct *target)
 {
-       struct utrace *utrace;
-
-       task_lock(target);
-       utrace = rcu_dereference(target->utrace);
-       rcu_assign_pointer(target->utrace, NULL);
-       task_unlock(target);
-
-       if (unlikely(!utrace))
-               return;
+       struct utrace *utrace = task_utrace_struct(target);
 
        spin_lock(&utrace->lock);
        /*
@@ -763,7 +651,7 @@ void utrace_release_task(struct task_str
        if (likely(!list_empty(&utrace->attached))) {
                utrace->reap = 1;
 
-               if (!(target->utrace_flags & DEATH_EVENTS)) {
+               if (!(utrace->flags & DEATH_EVENTS)) {
                        utrace_reap(target, utrace); /* Unlocks and frees.  */
                        return;
                }
@@ -853,7 +741,7 @@ int utrace_set_events(struct task_struct
        if (unlikely(IS_ERR(utrace)))
                return PTR_ERR(utrace);
 
-       old_utrace_flags = target->utrace_flags;
+       old_utrace_flags = utrace->flags;
        set_utrace_flags = events;
        old_flags = engine->flags;
 
@@ -899,12 +787,12 @@ int utrace_set_events(struct task_struct
                        spin_unlock(&utrace->lock);
                        return -EALREADY;
                }
-               target->utrace_flags |= set_utrace_flags;
+               utrace->flags |= set_utrace_flags;
                read_unlock(&tasklist_lock);
        }
 
        engine->flags = events | (engine->flags & ENGINE_STOP);
-       target->utrace_flags |= set_utrace_flags;
+       utrace->flags |= set_utrace_flags;
 
        if ((set_utrace_flags & UTRACE_EVENT_SYSCALL) &&
            !(old_utrace_flags & UTRACE_EVENT_SYSCALL))
@@ -961,7 +849,7 @@ static bool utrace_do_stop(struct task_s
         * through utrace_get_signal() before doing anything else.
         */
        if (task_is_stopped(target) &&
-           !(target->utrace_flags & UTRACE_EVENT(JCTL))) {
+           !(utrace->flags & UTRACE_EVENT(JCTL))) {
                utrace->stopped = 1;
                return true;
        }
@@ -974,10 +862,10 @@ static bool utrace_do_stop(struct task_s
                 * if it has already been through
                 * utrace_report_death(), or never will.
                 */
-               if (!(target->utrace_flags & DEATH_EVENTS))
+               if (!(utrace->flags & DEATH_EVENTS))
                        utrace->stopped = stopped = true;
        } else if (task_is_stopped(target)) {
-               if (!(target->utrace_flags & UTRACE_EVENT(JCTL)))
+               if (!(utrace->flags & UTRACE_EVENT(JCTL)))
                        utrace->stopped = stopped = true;
        } else if (!utrace->report && !utrace->interrupt) {
                utrace->report = 1;
@@ -1017,7 +905,7 @@ static void utrace_wakeup(struct task_st
 
 /*
  * This is called when there might be some detached engines on the list or
- * some stale bits in @task->utrace_flags.  Clean them up and recompute the
+ * some stale bits in @task->utrace.flags.  Clean them up and recompute the
  * flags.
  *
  * @action is NULL when @task is stopped and @utrace->stopped is set; wake
@@ -1064,7 +952,7 @@ static void utrace_reset(struct task_str
                clear_tsk_thread_flag(task, TIF_SYSCALL_TRACE);
        }
 
-       task->utrace_flags = flags;
+       utrace->flags = flags;
 
        if (wake)
                utrace_wakeup(task, utrace);
@@ -1075,21 +963,8 @@ static void utrace_reset(struct task_str
        if (flags) {
                spin_unlock(&utrace->lock);
        } else {
-               /*
-                * No more engines, clear out the utrace.  Here we can race
-                * with utrace_release_task().  If it gets task_lock()
-                * first, then it cleans up this struct for us.
-                */
-
-               task_lock(task);
-               if (unlikely(task->utrace != utrace)) {
-                       task_unlock(task);
-                       spin_unlock(&utrace->lock);
-               } else {
-                       rcu_assign_pointer(task->utrace, NULL);
-                       task_unlock(task);
-                       rcu_utrace_free(utrace);
-               }
+               CHECK_DEAD(utrace);
+               spin_unlock(&utrace->lock);
 
                if (action)
                        *action = UTRACE_RESUME;
@@ -1241,7 +1116,7 @@ int utrace_control(struct task_struct *t
                    unlikely(utrace->reap)) {
                        spin_unlock(&utrace->lock);
                        return -ESRCH;
-               } else if (unlikely(target->utrace_flags & DEATH_EVENTS) ||
+               } else if (unlikely(utrace->flags & DEATH_EVENTS) ||
                           unlikely(utrace->death)) {
                        /*
                         * We have already started the death report, or
@@ -1464,7 +1339,7 @@ static void start_report(struct utrace *
  * returns from engine callbacks.  If any engine's last callback used
  * UTRACE_STOP, we do UTRACE_REPORT here to ensure we stop before user
  * mode.  If there were no callbacks made, it will recompute
- * @task->utrace_flags to avoid another false-positive.
+ * @task->utrace.flags to avoid another false-positive.
  */
 static void finish_report(struct utrace_report *report,
                          struct task_struct *task, struct utrace *utrace)
@@ -1627,7 +1502,7 @@ void utrace_report_exec(struct linux_bin
                        struct pt_regs *regs)
 {
        struct task_struct *task = current;
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        INIT_REPORT(report);
 
        REPORT(task, utrace, &report, UTRACE_EVENT(EXEC),
@@ -1641,7 +1516,7 @@ void utrace_report_exec(struct linux_bin
 bool utrace_report_syscall_entry(struct pt_regs *regs)
 {
        struct task_struct *task = current;
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        INIT_REPORT(report);
 
        start_report(utrace);
@@ -1684,7 +1559,7 @@ bool utrace_report_syscall_entry(struct 
 void utrace_report_syscall_exit(struct pt_regs *regs)
 {
        struct task_struct *task = current;
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        INIT_REPORT(report);
 
        REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_EXIT),
@@ -1700,23 +1575,23 @@ void utrace_report_syscall_exit(struct p
 void utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
 {
        struct task_struct *task = current;
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        INIT_REPORT(report);
 
        /*
         * We don't use the REPORT() macro here, because we need
-        * to clear utrace->u.live.cloning before finish_report().
+        * to clear utrace->cloning before finish_report().
         * After finish_report(), utrace can be a stale pointer
         * in cases when report.action is still UTRACE_RESUME.
         */
        start_report(utrace);
-       utrace->u.live.cloning = child;
+       utrace->cloning = child;
 
        REPORT_CALLBACKS(task, utrace, &report,
                         UTRACE_EVENT(CLONE), report_clone,
                         report.action, engine, task, clone_flags, child);
 
-       utrace->u.live.cloning = NULL;
+       utrace->cloning = NULL;
        finish_report(&report, task, utrace);
 
        /*
@@ -1739,7 +1614,7 @@ void utrace_report_clone(unsigned long c
  */
 void utrace_finish_vfork(struct task_struct *task)
 {
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
 
        spin_lock(&utrace->lock);
        if (!utrace->vfork_stop)
@@ -1757,7 +1632,7 @@ void utrace_finish_vfork(struct task_str
 void utrace_report_jctl(int notify, int what)
 {
        struct task_struct *task = current;
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        INIT_REPORT(report);
        bool was_stopped = task_is_stopped(task);
 
@@ -1768,29 +1643,17 @@ void utrace_report_jctl(int notify, int 
         *
         * While in TASK_STOPPED, we can be considered safely
         * stopped by utrace_do_stop() and detached asynchronously.
-        * If we woke up and checked task->utrace_flags before that
+        * If we woke up and checked task->utrace.flags before that
         * was finished, we might be here with utrace already
         * removed or in the middle of being removed.
         *
-        * RCU makes it safe to get the utrace->lock even if it's
-        * being freed.  Once we have that lock, either an external
-        * detach has finished and this struct has been freed, or
-        * else we know we are excluding any other detach attempt.
-        *
         * If we are indeed attached, then make sure we are no
         * longer considered stopped while we run callbacks.
         */
-       rcu_read_lock();
-       utrace = rcu_dereference(task->utrace);
-       if (unlikely(!utrace)) {
-               rcu_read_unlock();
-               return;
-       }
        spin_lock(&utrace->lock);
        utrace->stopped = 0;
        utrace->report = 0;
        spin_unlock(&utrace->lock);
-       rcu_read_unlock();
 
        REPORT(task, utrace, &report, UTRACE_EVENT(JCTL),
               report_jctl, was_stopped ? CLD_STOPPED : CLD_CONTINUED, what);
@@ -1825,7 +1688,7 @@ void utrace_report_jctl(int notify, int 
 void utrace_report_exit(long *exit_code)
 {
        struct task_struct *task = current;
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        INIT_REPORT(report);
        long orig_code = *exit_code;
 
@@ -1935,7 +1798,7 @@ static void finish_resume_report(struct 
  */
 void utrace_resume(struct task_struct *task, struct pt_regs *regs)
 {
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        INIT_REPORT(report);
        struct utrace_attached_engine *engine, *next;
 
@@ -1987,13 +1850,13 @@ void utrace_resume(struct task_struct *t
 /*
  * Return true if current has forced signal_pending().
  *
- * This is called only when current->utrace_flags is nonzero, so we know
+ * This is called only when current->utrace.flags is nonzero, so we know
  * that current->utrace must be set.  It's not inlined in tracehook.h
  * just so that struct utrace can stay opaque outside this file.
  */
 bool utrace_interrupt_pending(void)
 {
-       return current->utrace->interrupt;
+       return current->utrace.interrupt;
 }
 
 /*
@@ -2034,7 +1897,7 @@ int utrace_get_signal(struct task_struct
        __releases(task->sighand->siglock)
        __acquires(task->sighand->siglock)
 {
-       struct utrace *utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        struct k_sigaction *ka;
        INIT_REPORT(report);
        struct utrace_attached_engine *engine, *next;
@@ -2043,44 +1906,13 @@ int utrace_get_signal(struct task_struct
        u32 ret;
        int signr;
 
-       /*
-        * We could have been considered quiescent while we were in
-        * TASK_STOPPED, and detached asynchronously.  If we woke up
-        * and checked task->utrace_flags before that was finished,
-        * we might be here with utrace already removed or in the
-        * middle of being removed.
-        */
-       rcu_read_lock();
-       utrace = rcu_dereference(task->utrace);
-       if (unlikely(!utrace)) {
-               rcu_read_unlock();
-               return 0;
-       }
-
        if (utrace->interrupt || utrace->report || utrace->signal_handler) {
                /*
                 * We've been asked for an explicit report before we
                 * even check for pending signals.
                 */
-
                spin_unlock_irq(&task->sighand->siglock);
-
-               /*
-                * RCU makes it safe to get the utrace->lock even if
-                * it's being freed.  Once we have that lock, either an
-                * external detach has finished and this struct has been
-                * freed, or else we know we are excluding any other
-                * detach attempt.
-                */
                spin_lock(&utrace->lock);
-               rcu_read_unlock();
-
-               if (unlikely(task->utrace != utrace)) {
-                       spin_unlock(&utrace->lock);
-                       cond_resched();
-                       return -1;
-               }
-
                splice_attaching(utrace);
 
                if (unlikely(!utrace->interrupt) && unlikely(!utrace->report))
@@ -2123,12 +1955,11 @@ int utrace_get_signal(struct task_struct
                event = 0;
                ka = NULL;
                memset(return_ka, 0, sizeof *return_ka);
-       } else if ((task->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) == 0) {
+       } else if ((utrace->flags & UTRACE_EVENT_SIGNAL_ALL) == 0) {
                /*
                 * If noone is interested in intercepting signals,
                 * let the caller just dequeue them normally.
                 */
-               rcu_read_unlock();
                return 0;
        } else {
                if (unlikely(utrace->stopped)) {
@@ -2147,17 +1978,9 @@ int utrace_get_signal(struct task_struct
                         */
                        spin_unlock_irq(&task->sighand->siglock);
                        spin_lock(&utrace->lock);
-                       rcu_read_unlock();
-                       if (unlikely(task->utrace != utrace)) {
-                               spin_unlock(&utrace->lock);
-                               cond_resched();
-                               return -1;
-                       }
                        utrace->stopped = 0;
                        spin_unlock(&utrace->lock);
                        spin_lock_irq(&task->sighand->siglock);
-               } else {
-                       rcu_read_unlock();
                }
 
                /*
@@ -2209,7 +2032,7 @@ int utrace_get_signal(struct task_struct
                 * Now that we know what event type this signal is,
                 * we can short-circuit if noone cares about those.
                 */
-               if ((task->utrace_flags & (event | UTRACE_EVENT(QUIESCE))) == 0)
+               if ((utrace->flags & (event | UTRACE_EVENT(QUIESCE))) == 0)
                        return signr;
 
                /*
@@ -2398,7 +2221,7 @@ int utrace_get_signal(struct task_struct
  */
 void utrace_signal_handler(struct task_struct *task, int stepping)
 {
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
 
        spin_lock(&utrace->lock);
 
@@ -2544,23 +2367,19 @@ EXPORT_SYMBOL_GPL(task_user_regset_view)
  */
 struct task_struct *utrace_tracer_task(struct task_struct *target)
 {
-       struct utrace *utrace;
+       struct utrace *utrace = task_utrace_struct(target);
        struct task_struct *tracer = NULL;
+       struct list_head *pos, *next;
+       struct utrace_attached_engine *engine;
+       const struct utrace_engine_ops *ops;
 
-       utrace = rcu_dereference(target->utrace);
-       if (utrace != NULL) {
-               struct list_head *pos, *next;
-               struct utrace_attached_engine *engine;
-               const struct utrace_engine_ops *ops;
-               list_for_each_safe(pos, next, &utrace->attached) {
-                       engine = list_entry(pos, struct utrace_attached_engine,
-                                           entry);
-                       ops = rcu_dereference(engine->ops);
-                       if (ops->tracer_task) {
-                               tracer = (*ops->tracer_task)(engine, target);
-                               if (tracer != NULL)
-                                       break;
-                       }
+       list_for_each_safe(pos, next, &utrace->attached) {
+               engine = list_entry(pos, struct utrace_attached_engine, entry);
+               ops = rcu_dereference(engine->ops);
+               if (ops->tracer_task) {
+                       tracer = (*ops->tracer_task)(engine, target);
+                       if (tracer != NULL)
+                               break;
                }
        }
 
@@ -2573,7 +2392,7 @@ struct task_struct *utrace_tracer_task(s
  */
 int utrace_unsafe_exec(struct task_struct *task)
 {
-       struct utrace *utrace = task->utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        struct utrace_attached_engine *engine, *next;
        const struct utrace_engine_ops *ops;
        int unsafe = 0;
@@ -2592,11 +2411,11 @@ int utrace_unsafe_exec(struct task_struc
  */
 void task_utrace_proc_status(struct seq_file *m, struct task_struct *p)
 {
-       struct utrace *utrace = rcu_dereference(p->utrace);
-       if (unlikely(utrace))
-               seq_printf(m, "Utrace: %lx%s%s%s\n",
-                          p->utrace_flags,
-                          utrace->stopped ? " (stopped)" : "",
-                          utrace->report ? " (report)" : "",
-                          utrace->interrupt ? " (interrupt)" : "");
+       struct utrace *utrace = task_utrace_struct(p);
+
+       seq_printf(m, "Utrace: %lx%s%s%s\n",
+                       utrace->flags,
+                       utrace->stopped ? " (stopped)" : "",
+                       utrace->report ? " (report)" : "",
+                       utrace->interrupt ? " (interrupt)" : "");
 }
Index: utrace-19jan/include/linux/tracehook.h
===================================================================
--- utrace-19jan.orig/include/linux/tracehook.h
+++ utrace-19jan/include/linux/tracehook.h
@@ -370,8 +370,7 @@ static inline void tracehook_report_vfor
 static inline void tracehook_prepare_release_task(struct task_struct *task)
 {
        smp_mb();
-       if (task_utrace_struct(task) != NULL)
-               utrace_release_task(task);
+       utrace_release_task(task);
 }
 
 /**
@@ -385,21 +384,8 @@ static inline void tracehook_prepare_rel
  */
 static inline void tracehook_finish_release_task(struct task_struct *task)
 {
-       int bad = 0;
        ptrace_release_task(task);
        BUG_ON(task->exit_state != EXIT_DEAD);
-       if (unlikely(task_utrace_struct(task) != NULL)) {
-               /*
-                * In a race condition, utrace_attach() will temporarily set
-                * it, but then check @task->exit_state and clear it.  It does
-                * all this under task_lock(), so we take the lock to check
-                * that there is really a bug and not just that known race.
-                */
-               task_lock(task);
-               bad = unlikely(task_utrace_struct(task) != NULL);
-               task_unlock(task);
-       }
-       BUG_ON(bad);
 }
 
 /**
Index: utrace-19jan/kernel/ptrace.c
===================================================================
--- utrace-19jan.orig/kernel/ptrace.c
+++ utrace-19jan/kernel/ptrace.c
@@ -778,7 +778,16 @@ static inline bool exclude_ptrace(struct
  */
 static inline bool exclude_ptrace(struct task_struct *task)
 {
-       return unlikely(!!task_utrace_struct(task));
+       struct utrace *utrace = task_utrace_struct(task);
+
+       spin_lock(&utrace->lock);
+       if (list_empty(&utrace->attached) && list_empty(&utrace->attaching)) {
+               spin_unlock(&utrace->lock);
+               return false;
+       }
+
+       spin_unlock(&utrace->lock);
+       return true;
 }
 #endif
 

Reply via email to