Module: xenomai-forge
Branch: next
Commit: 5743b8d28808e8e5993d32c209a735aabcda5182
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=5743b8d28808e8e5993d32c209a735aabcda5182

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed May  8 18:46:09 2013 +0200

copperplate/threadobj: scheduler lock, unlock calls implicitly apply to current

---

 include/copperplate/threadobj.h |   12 +++--
 kernel/cobalt/nucleus/shadow.c  |   28 ++++++++++----
 lib/copperplate/threadobj.c     |   79 ++++++++++++++++++---------------------
 lib/psos/task.c                 |    6 +-
 lib/vxworks/taskLib.c           |    4 +-
 5 files changed, 68 insertions(+), 61 deletions(-)

diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index 7d8cb9f..ac8d846 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -295,9 +295,9 @@ int threadobj_resume(struct threadobj *thobj);
 
 int threadobj_unblock(struct threadobj *thobj);
 
-int threadobj_lock_sched(struct threadobj *thobj);
+int threadobj_lock_sched(void);
 
-int threadobj_unlock_sched(struct threadobj *thobj);
+int threadobj_unlock_sched(void);
 
 int threadobj_set_priority(struct threadobj *thobj, int prio);
 
@@ -397,10 +397,12 @@ static inline int threadobj_current_p(void)
        return current && current != THREADOBJ_IRQCONTEXT;
 }
 
-static inline int threadobj_lock_sched_once(struct threadobj *thobj)
+static inline int threadobj_lock_sched_once(void)
 {
-       if (thobj->schedlock_depth == 0)
-               return threadobj_lock_sched(thobj);
+       struct threadobj *current = threadobj_current();
+
+       if (current->schedlock_depth == 0)
+               return threadobj_lock_sched();
 
        return 0;
 }
diff --git a/kernel/cobalt/nucleus/shadow.c b/kernel/cobalt/nucleus/shadow.c
index 0a043c6..ea40c72 100644
--- a/kernel/cobalt/nucleus/shadow.c
+++ b/kernel/cobalt/nucleus/shadow.c
@@ -270,18 +270,30 @@ static void request_syscall_restart(struct xnthread 
*thread,
        xnshadow_relax(notify, SIGDEBUG_MIGRATE_SIGNAL);
 }
 
-static inline void lock_timers(void)
+static inline void __lock_timers(struct xnthread *thread, const char *fn)
 {
        xnarch_atomic_inc(&nkpod->timerlck);
-       setbits(nkclock.status, XNTBLCK);
+       setbits(nktbase.status, XNTBLCK);
+       XENO_BUGON(NUCLEUS, xnarch_atomic_get(&nkpod->timerlck) == 0);
+       printk(KERN_WARNING "LOCK %s, thread=%s[%d], count=%ld\n",
+              fn, thread->name, xnthread_user_pid(thread),
+              xnarch_atomic_get(&nkpod->timerlck));
 }
 
-static inline void unlock_timers(void)
+static inline void __unlock_timers(void)
 {
-       if (xnarch_atomic_dec_and_test(&nkpod->timerlck))
-               clrbits(nkclock.status, XNTBLCK);
+       if (xnarch_atomic_dec_and_test(&nkpod->timerlck)) {
+               clrbits(nktbase.status, XNTBLCK);
+               XENO_BUGON(NUCLEUS, xnarch_atomic_get(&nkpod->timerlck) != 0);
+       }
+       printk(KERN_WARNING "UNLOCK %s, thread=%s[%d], count=%ld\n",
+              fn, thread->name, xnthread_user_pid(thread),
+              xnarch_atomic_get(&nkpod->timerlck));
 }
 
+#define lock_timers(t)         __lock_timers((t), __func__)
+#define unlock_timers(t)       __unlock_timers((t), __func__)
+
 struct lostage_wakeup {
        struct ipipe_work_header work; /* Must be first. */
        struct task_struct *task;
@@ -2159,7 +2171,7 @@ static int handle_taskexit_event(struct task_struct *p) 
/* p == current */
                   thread, xnthread_name(thread));
 
        if (xnthread_test_state(thread, XNDEBUG))
-               unlock_timers();
+               unlock_timers(thread);
 
        /* __xnpod_cleanup_thread() -> hook -> xnshadow_unmap() */
        __xnpod_cleanup_thread(thread);
@@ -2217,7 +2229,7 @@ static int handle_schedule_event(struct task_struct 
*next_task)
                                goto no_ptrace;
                }
                xnthread_clear_state(next, XNDEBUG);
-               unlock_timers();
+               unlock_timers(next);
        }
 
 no_ptrace:
@@ -2272,7 +2284,7 @@ static int handle_sigwake_event(struct task_struct *p)
                    sigismember(&pending, SIGSTOP)
                    || sigismember(&pending, SIGINT)) {
                        xnthread_set_state(thread, XNDEBUG);
-                       lock_timers();
+                       lock_timers(thread);
                }
        }
 
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index fb05a13..93373f4 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -173,16 +173,16 @@ int threadobj_resume(struct threadobj *thobj) /* 
thobj->lock held */
        return __bt(-ret);
 }
 
-int threadobj_lock_sched(struct threadobj *thobj) /* thobj->lock held */
+int threadobj_lock_sched(void) /* current->lock held */
 {
-       __threadobj_check_locked(thobj);
+       struct threadobj *current = threadobj_current();
 
-       assert(thobj == threadobj_current());
+       __threadobj_check_locked(current);
 
-       if (thobj->schedlock_depth++ > 0)
+       if (current->schedlock_depth++ > 0)
                return 0;
 
-       thobj->status |= __THREAD_S_NOPREEMPT;
+       current->status |= __THREAD_S_NOPREEMPT;
        /*
         * In essence, we can't be scheduled out as a result of
         * locking the scheduler, so no need to drop the thread lock
@@ -191,13 +191,11 @@ int threadobj_lock_sched(struct threadobj *thobj) /* 
thobj->lock held */
        return __bt(-pthread_set_mode_np(0, PTHREAD_LOCK_SCHED, NULL));
 }
 
-int threadobj_unlock_sched(struct threadobj *thobj) /* thobj->lock held */
+int threadobj_unlock_sched(void) /* current->lock held */
 {
-       int ret;
-
-       __threadobj_check_locked(thobj);
+       struct threadobj *current = threadobj_current();
 
-       assert(thobj == threadobj_current());
+       __threadobj_check_locked(current);
 
        /*
         * Higher layers may not know about the current locking level
@@ -205,18 +203,15 @@ int threadobj_unlock_sched(struct threadobj *thobj) /* 
thobj->lock held */
         * unbalanced calls here, and let them decide of the outcome
         * in case of error.
         */
-       if (thobj->schedlock_depth == 0)
+       if (current->schedlock_depth == 0)
                return __bt(-EINVAL);
 
-       if (--thobj->schedlock_depth > 0)
+       if (--current->schedlock_depth > 0)
                return 0;
 
-       thobj->status &= ~__THREAD_S_NOPREEMPT;
-       threadobj_unlock(thobj);
-       ret = pthread_set_mode_np(PTHREAD_LOCK_SCHED, 0, NULL);
-       threadobj_lock(thobj);
+       current->status &= ~__THREAD_S_NOPREEMPT;
 
-       return __bt(-ret);
+       return __bt(-pthread_set_mode_np(PTHREAD_LOCK_SCHED, 0, NULL));
 }
 
 int threadobj_set_priority(struct threadobj *thobj, int prio) /* thobj->lock 
held, dropped */
@@ -494,51 +489,49 @@ int threadobj_resume(struct threadobj *thobj) /* 
thobj->lock held */
        return __bt(notifier_release(&thobj->core.notifier));
 }
 
-int threadobj_lock_sched(struct threadobj *thobj) /* thobj->lock held */
+int threadobj_lock_sched(void) /* current->lock held */
 {
-       pthread_t tid = thobj->tid;
+       struct threadobj *current = threadobj_current();
+       pthread_t tid = current->tid;
        struct sched_param param;
 
-       __threadobj_check_locked(thobj);
-
-       assert(thobj == threadobj_current());
+       __threadobj_check_locked(current);
 
-       if (thobj->schedlock_depth++ > 0)
+       if (current->schedlock_depth++ > 0)
                return 0;
 
-       thobj->core.prio_unlocked = thobj->priority;
-       thobj->core.policy_unlocked = thobj->policy;
-       thobj->status |= __THREAD_S_NOPREEMPT;
-       thobj->priority = threadobj_lock_prio;
-       thobj->policy = SCHED_RT;
+       current->core.prio_unlocked = current->priority;
+       current->core.policy_unlocked = current->policy;
+       current->status |= __THREAD_S_NOPREEMPT;
+       current->priority = threadobj_lock_prio;
+       current->policy = SCHED_RT;
        param.sched_priority = threadobj_lock_prio;
 
        return __bt(-pthread_setschedparam(tid, SCHED_RT, &param));
 }
 
-int threadobj_unlock_sched(struct threadobj *thobj) /* thobj->lock held */
+int threadobj_unlock_sched(void) /* current->lock held */
 {
-       pthread_t tid = thobj->tid;
+       struct threadobj *current = threadobj_current();
+       pthread_t tid = current->tid;
        struct sched_param param;
        int policy, ret;
 
-       __threadobj_check_locked(thobj);
-
-       assert(thobj == threadobj_current());
+       __threadobj_check_locked(current);
 
-       if (thobj->schedlock_depth == 0)
+       if (current->schedlock_depth == 0)
                return __bt(-EINVAL);
 
-       if (--thobj->schedlock_depth > 0)
+       if (--current->schedlock_depth > 0)
                return 0;
 
-       thobj->status &= ~__THREAD_S_NOPREEMPT;
-       thobj->priority = thobj->core.prio_unlocked;
-       param.sched_priority = thobj->core.prio_unlocked;
-       policy = thobj->core.policy_unlocked;
-       threadobj_unlock(thobj);
+       current->status &= ~__THREAD_S_NOPREEMPT;
+       current->priority = current->core.prio_unlocked;
+       param.sched_priority = current->core.prio_unlocked;
+       policy = current->core.policy_unlocked;
+       threadobj_unlock(current);
        ret = pthread_setschedparam(tid, policy, &param);
-       threadobj_lock(thobj);
+       threadobj_lock(current);
 
        return __bt(-ret);
 }
@@ -592,9 +585,9 @@ int threadobj_set_mode(int clrmask, int setmask, int 
*mode_r) /* current->lock h
                old |= __THREAD_M_LOCK;
 
        if (setmask & __THREAD_M_LOCK)
-               ret = __bt(threadobj_lock_sched_once(current));
+               ret = __bt(threadobj_lock_sched_once());
        else if (clrmask & __THREAD_M_LOCK)
-               threadobj_unlock_sched(current);
+               threadobj_unlock_sched();
 
        if (mode_r)
                *mode_r = old;
diff --git a/lib/psos/task.c b/lib/psos/task.c
index c3718f8..a00cca2 100644
--- a/lib/psos/task.c
+++ b/lib/psos/task.c
@@ -193,7 +193,7 @@ static void *task_trampoline(void *arg)
                threadobj_set_rr(&task->thobj, &psos_rrperiod);
 
        if (task->mode & T_NOPREEMPT)
-               threadobj_lock_sched(&task->thobj);
+               threadobj_lock_sched();
 
        threadobj_unlock(&task->thobj);
 
@@ -546,9 +546,9 @@ u_long t_mode(u_long mask, u_long newmask, u_long 
*oldmode_r)
        task->mode |= (newmask & mask);
 
        if (task->mode & T_NOPREEMPT)
-               threadobj_lock_sched_once(&task->thobj);
+               threadobj_lock_sched_once();
        else if (*oldmode_r & T_NOPREEMPT)
-               threadobj_unlock_sched(&task->thobj);
+               threadobj_unlock_sched();
 
        /*
         * Copperplate won't accept to turn round-robin on/off when
diff --git a/lib/vxworks/taskLib.c b/lib/vxworks/taskLib.c
index c891dd5..a212ea2 100644
--- a/lib/vxworks/taskLib.c
+++ b/lib/vxworks/taskLib.c
@@ -782,7 +782,7 @@ STATUS taskLock(void)
        }
 
        COPPERPLATE_PROTECT(svc);
-       threadobj_lock_sched(&task->thobj);
+       threadobj_lock_sched();
        COPPERPLATE_UNPROTECT(svc);
        put_wind_task(task);
 
@@ -806,7 +806,7 @@ STATUS taskUnlock(void)
        }
 
        COPPERPLATE_PROTECT(svc);
-       threadobj_unlock_sched(&task->thobj);
+       threadobj_unlock_sched();
        COPPERPLATE_UNPROTECT(svc);
        put_wind_task(task);
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to