Module: xenomai-forge
Branch: next
Commit: 0dc7e0303749c694e70e8bbbe1e989296ca5b386
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=0dc7e0303749c694e70e8bbbe1e989296ca5b386

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Jun 28 20:18:51 2013 +0200

cobalt/kernel: drop __setbits()

This non-atomic form cannot operate on any arbitray bit number like
__set_bit() but rather works with a plain bitmask. There is no point
in obfuscating the code via this indirection. Drop it.

---

 include/cobalt/kernel/sched.h  |   10 +++++-----
 include/cobalt/kernel/thread.h |   23 +++++++++++++++++++----
 include/cobalt/kernel/types.h  |    1 -
 kernel/cobalt/intr.c           |   10 +++++-----
 kernel/cobalt/map.c            |    4 ++--
 kernel/cobalt/pipe.c           |   33 ++++++++++++++++-----------------
 kernel/cobalt/pod.c            |   10 +++++-----
 kernel/cobalt/sched.c          |    4 ++--
 kernel/cobalt/synch.c          |    4 ++--
 kernel/cobalt/timer.c          |   18 +++++++++---------
 10 files changed, 65 insertions(+), 52 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index db0431c..d4a6480 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -148,7 +148,7 @@ static inline int xnsched_resched_p(struct xnsched *sched)
 /* Set self resched flag for the given scheduler. */
 #define xnsched_set_self_resched(__sched__) do {               \
   XENO_BUGON(NUCLEUS, __sched__ != xnpod_current_sched());     \
-  __setbits((__sched__)->status, XNRESCHED);                   \
+  (__sched__)->status |= XNRESCHED;                            \
 } while (0)
 
 /* Set resched flag for the given scheduler. */
@@ -156,11 +156,11 @@ static inline int xnsched_resched_p(struct xnsched *sched)
 #define xnsched_set_resched(__sched__) do {                            \
   xnsched_t *current_sched = xnpod_current_sched();                    \
   if (current_sched == (__sched__))                                    \
-      __setbits(current_sched->status, XNRESCHED);                     \
+      current_sched->status |= XNRESCHED;                              \
   else if (!xnsched_resched_p(__sched__)) {                            \
-      cpu_set(xnsched_cpu(__sched__), current_sched->resched); \
-      __setbits((__sched__)->status, XNRESCHED);                       \
-      __setbits(current_sched->status, XNRESCHED);                     \
+      cpu_set(xnsched_cpu(__sched__), current_sched->resched);         \
+      (__sched__)->status |= XNRESCHED;                                        
\
+      current_sched->status |= XNRESCHED;                              \
   }                                                                    \
 } while (0)
 #else /* !CONFIG_SMP */
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index 287b779..738c26b 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -188,16 +188,31 @@ static inline int xnthread_test_state(struct xnthread 
*thread, int bits)
        return thread->state & bits;
 }
 
-#define xnthread_set_state(thread,flags)   __setbits((thread)->state,flags)
-#define xnthread_clear_state(thread,flags) __clrbits((thread)->state,flags)
+static inline void xnthread_set_state(struct xnthread *thread, int bits)
+{
+       thread->state |= bits;
+}
+
+static inline void xnthread_clear_state(struct xnthread *thread, int bits)
+{
+       thread->state &= ~bits;
+}
 
 static inline int xnthread_test_info(struct xnthread *thread, int bits)
 {
        return thread->info & bits;
 }
 
-#define xnthread_set_info(thread,flags)    __setbits((thread)->info,flags)
-#define xnthread_clear_info(thread,flags)  __clrbits((thread)->info,flags)
+static inline void xnthread_set_info(struct xnthread *thread, int bits)
+{
+       thread->info |= bits;
+}
+
+static inline void xnthread_clear_info(struct xnthread *thread, int bits)
+{
+       thread->info &= ~bits;
+}
+
 #define xnthread_lock_count(thread)        ((thread)->schedlck)
 #define xnthread_init_schedparam(thread)   ((thread)->init_schedparam)
 #define xnthread_base_priority(thread)     ((thread)->bprio)
diff --git a/include/cobalt/kernel/types.h b/include/cobalt/kernel/types.h
index 5cf3f9a..9375c2f 100644
--- a/include/cobalt/kernel/types.h
+++ b/include/cobalt/kernel/types.h
@@ -24,7 +24,6 @@
 
 #define setbits(flags,mask)  xnarch_atomic_set_mask(&(flags),mask)
 #define clrbits(flags,mask)  xnarch_atomic_clear_mask(&(flags),mask)
-#define __setbits(flags,mask)  do { (flags) |= (mask); } while(0)
 #define __clrbits(flags,mask)  do { (flags) &= ~(mask); } while(0)
 
 #define XENO_INFO KERN_INFO    "[Xenomai] "
diff --git a/kernel/cobalt/intr.c b/kernel/cobalt/intr.c
index 9bc5ab1..5628f27 100644
--- a/kernel/cobalt/intr.c
+++ b/kernel/cobalt/intr.c
@@ -106,7 +106,7 @@ void xnintr_clock_handler(void)
        trace_mark(xn_nucleus, clock_tick, MARK_NOARGS);
 
        ++sched->inesting;
-       __setbits(sched->lflags, XNINIRQ);
+       sched->lflags |= XNINIRQ;
 
        xnlock_get(&nklock);
        xntimer_tick();
@@ -178,7 +178,7 @@ static void xnintr_shirq_handler(unsigned irq, void *cookie)
        trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
 
        ++sched->inesting;
-       __setbits(sched->lflags, XNINIRQ);
+       sched->lflags |= XNINIRQ;
 
        xnlock_get(&shirq->lock);
        intr = shirq->handlers;
@@ -248,7 +248,7 @@ static void xnintr_edge_shirq_handler(unsigned irq, void 
*cookie)
        trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
 
        ++sched->inesting;
-       __setbits(sched->lflags, XNINIRQ);
+       sched->lflags |= XNINIRQ;
 
        xnlock_get(&shirq->lock);
        intr = shirq->handlers;
@@ -445,7 +445,7 @@ static void xnintr_irq_handler(unsigned irq, void *cookie)
        trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
 
        ++sched->inesting;
-       __setbits(sched->lflags, XNINIRQ);
+       sched->lflags |= XNINIRQ;
 
        xnlock_get(&xnirqs[irq].lock);
 
@@ -732,7 +732,7 @@ int xnintr_attach(xnintr_t *intr, void *cookie)
        if (ret)
                goto out;
 
-       __setbits(intr->flags, XN_ISR_ATTACHED);
+       intr->flags |= XN_ISR_ATTACHED;
        xnintr_stat_counter_inc();
 out:
        xnlock_put_irqrestore(&intrlock, s);
diff --git a/kernel/cobalt/map.c b/kernel/cobalt/map.c
index 0a075be..443da87 100644
--- a/kernel/cobalt/map.c
+++ b/kernel/cobalt/map.c
@@ -259,8 +259,8 @@ int xnmap_remove(xnmap_t *map, int key)
        lo = ofkey % BITS_PER_LONG;
        xnlock_get_irqsave(&nklock, s);
        map->objarray[ofkey] = NULL;
-       __setbits(map->himap, 1UL << hi);
-       __setbits(map->lomap[hi], 1UL << lo);
+       map->himap |= (1UL << hi);
+       map->lomap[hi] |= (1UL << lo);
        --map->ukeys;
        xnlock_put_irqrestore(&nklock, s);
 
diff --git a/kernel/cobalt/pipe.c b/kernel/cobalt/pipe.c
index 09489c5..2bba2ee 100644
--- a/kernel/cobalt/pipe.c
+++ b/kernel/cobalt/pipe.c
@@ -69,8 +69,8 @@ static inline int xnpipe_minor_alloc(int minor)
             (1UL << (minor % BITS_PER_LONG))))
                minor = -EBUSY;
        else
-               __setbits(xnpipe_bitmap[minor / BITS_PER_LONG],
-                         1UL << (minor % BITS_PER_LONG));
+               xnpipe_bitmap[minor / BITS_PER_LONG] |=
+                       (1UL << (minor % BITS_PER_LONG));
 
        xnlock_put_irqrestore(&nklock, s);
 
@@ -88,7 +88,7 @@ static inline void xnpipe_enqueue_wait(struct xnpipe_state 
*state, int mask)
        if (state->wcount != 0x7fffffff && state->wcount++ == 0)
                list_add_tail(&state->slink, &xnpipe_sleepq);
 
-       __setbits(state->status, mask);
+       state->status |= mask;
 }
 
 static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask)
@@ -330,7 +330,7 @@ int xnpipe_connect(int minor, struct xnpipe_operations 
*ops, void *xstate)
                return ret;
        }
 
-       __setbits(state->status, XNPIPE_KERN_CONN);
+       state->status |= XNPIPE_KERN_CONN;
        xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL);
        state->xstate = xstate;
        state->ionrd = 0;
@@ -341,12 +341,12 @@ int xnpipe_connect(int minor, struct xnpipe_operations 
*ops, void *xstate)
                         * Wake up the regular Linux task waiting for
                         * the kernel side to connect (xnpipe_open).
                         */
-                       __setbits(state->status, XNPIPE_USER_WREAD_READY);
+                       state->status |= XNPIPE_USER_WREAD_READY;
                        need_sched = 1;
                }
 
                if (state->asyncq) {    /* Schedule asynch sig. */
-                       __setbits(state->status, XNPIPE_USER_SIGIO);
+                       state->status |= XNPIPE_USER_SIGIO;
                        need_sched = 1;
                }
        }
@@ -396,12 +396,12 @@ int xnpipe_disconnect(int minor)
                 * operation from the Xenomai side (read/write or
                 * poll).
                 */
-               __setbits(state->status, XNPIPE_USER_WREAD_READY);
+               state->status |= XNPIPE_USER_WREAD_READY;
                need_sched = 1;
        }
 
        if (state->asyncq) {    /* Schedule asynch sig. */
-               __setbits(state->status, XNPIPE_USER_SIGIO);
+               state->status |= XNPIPE_USER_SIGIO;
                need_sched = 1;
        }
 
@@ -412,7 +412,7 @@ cleanup:
         * out until then.
         */
        if (state->status & XNPIPE_USER_CONN)
-               __setbits(state->status, XNPIPE_KERN_LCLOSE);
+               state->status |= XNPIPE_KERN_LCLOSE;
        else {
                xnlock_put_irqrestore(&nklock, s);
                state->ops.release(state->xstate);
@@ -471,12 +471,12 @@ ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, 
size_t size, int flags)
                 * Wake up the regular Linux task waiting for input
                 * from the Xenomai side.
                 */
-               __setbits(state->status, XNPIPE_USER_WREAD_READY);
+               state->status |= XNPIPE_USER_WREAD_READY;
                need_sched = 1;
        }
 
        if (state->asyncq) {    /* Schedule asynch sig. */
-               __setbits(state->status, XNPIPE_USER_SIGIO);
+               state->status |= XNPIPE_USER_SIGIO;
                need_sched = 1;
        }
 
@@ -582,7 +582,7 @@ ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, 
xnticks_t timeout)
        ret = (ssize_t)xnpipe_m_size(mh);
 
        if (state->status & XNPIPE_USER_WSYNC) {
-               __setbits(state->status, XNPIPE_USER_WSYNC_READY);
+               state->status |= XNPIPE_USER_WSYNC_READY;
                xnpipe_schedule_request();
        }
 
@@ -622,7 +622,7 @@ int xnpipe_flush(int minor, int mode)
 
        if ((state->status & XNPIPE_USER_WSYNC) &&
            msgcount > state->nroutq + state->nrinq) {
-               __setbits(state->status, XNPIPE_USER_WSYNC_READY);
+               state->status |= XNPIPE_USER_WSYNC_READY;
                xnpipe_schedule_request();
        }
 
@@ -672,8 +672,7 @@ static int xnpipe_open(struct inode *inode, struct file 
*file)
                return -EBUSY;
        }
 
-       __setbits(state->status, XNPIPE_USER_CONN);
-
+       state->status |= XNPIPE_USER_CONN;
        file->private_data = state;
        init_waitqueue_head(&state->readq);
        init_waitqueue_head(&state->syncq);
@@ -845,7 +844,7 @@ static ssize_t xnpipe_read(struct file *file,
                state->ops.free_obuf(mh, state->xstate);
                xnlock_get_irqsave(&nklock, s);
                if (state->status & XNPIPE_USER_WSYNC) {
-                       __setbits(state->status, XNPIPE_USER_WSYNC_READY);
+                       state->status |= XNPIPE_USER_WSYNC_READY;
                        xnpipe_schedule_request();
                }
        }
@@ -975,7 +974,7 @@ static long xnpipe_ioctl(struct file *file, unsigned int 
cmd, unsigned long arg)
        kick_wsync:
 
                if (n > 0 && (state->status & XNPIPE_USER_WSYNC)) {
-                       __setbits(state->status, XNPIPE_USER_WSYNC_READY);
+                       state->status |= XNPIPE_USER_WSYNC_READY;
                        xnpipe_schedule_request();
                }
 
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 4a36a87..fc58a8d 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -168,7 +168,7 @@ void xnpod_fatal(const char *format, ...)
        if (!xnpod_active_p() || xnpod_fatal_p())
                goto out;
 
-       __setbits(nkpod->status, XNFATAL);
+       nkpod->status |= XNFATAL;
        now = xnclock_read_monotonic();
 
        printk(KERN_ERR "\n %-3s  %-6s %-8s %-8s %-8s  %s\n",
@@ -291,7 +291,7 @@ int xnpod_init(void)
 
        xnregistry_init();
 
-       __setbits(pod->status, XNPEXEC);
+       pod->status |= XNPEXEC;
        smp_wmb();
        xnshadow_grab_events();
 
@@ -1626,7 +1626,7 @@ static inline void xnpod_switch_to(xnsched_t *sched,
 {
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
        sched->last = prev;
-       __setbits(sched->status, XNINSW);
+       sched->status |= XNINSW;
        xnlock_clear_irqon(&nklock);
 #endif /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
 
@@ -1855,7 +1855,7 @@ signal_unlock_and_exit:
                goto reschedule;
 
        if (xnthread_lock_count(curr))
-               __setbits(sched->lflags, XNINLOCK);
+               sched->lflags |= XNINLOCK;
 
        xnlock_put_irqrestore(&nklock, s);
 
@@ -1892,7 +1892,7 @@ void ___xnpod_lock_sched(xnsched_t *sched)
        struct xnthread *curr = sched->curr;
 
        if (xnthread_lock_count(curr)++ == 0) {
-               __setbits(sched->lflags, XNINLOCK);
+               sched->lflags |= XNINLOCK;
                xnthread_set_state(curr, XNLOCK);
        }
 }
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 00be080..29f0ce4 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -512,8 +512,8 @@ static struct list_head *addmlq(struct xnsched_mlq *q, int 
prio)
        if (list_empty(head)) {
                hi = idx / BITS_PER_LONG;
                lo = idx % BITS_PER_LONG;
-               __setbits(q->himap, 1UL << hi);
-               __setbits(q->lomap[hi], 1UL << lo);
+               q->himap |= (1UL << hi);
+               q->lomap[hi] |= (1UL << lo);
        }
 
        return head;
diff --git a/kernel/cobalt/synch.c b/kernel/cobalt/synch.c
index f958bff..cd9b516 100644
--- a/kernel/cobalt/synch.c
+++ b/kernel/cobalt/synch.c
@@ -478,7 +478,7 @@ redo:
                        if (synch->status & XNSYNCH_CLAIMED)
                                list_del(&synch->link);
                        else
-                               __setbits(synch->status, XNSYNCH_CLAIMED);
+                               synch->status |= XNSYNCH_CLAIMED;
 
                        synch->wprio = thread->wprio;
                        list_add_priff(synch, &owner->claimq, wprio, link);
@@ -662,7 +662,7 @@ void xnsynch_requeue_sleeper(struct xnthread *thread)
                 * The resource was NOT claimed, claim it now and
                 * boost the owner.
                 */
-               __setbits(synch->status, XNSYNCH_CLAIMED);
+               synch->status |= XNSYNCH_CLAIMED;
                list_add_priff(synch, &owner->claimq, wprio, link);
                if (!xnthread_test_state(owner, XNBOOST)) {
                        owner->bprio = owner->cprio;
diff --git a/kernel/cobalt/timer.c b/kernel/cobalt/timer.c
index 95c9c07..3987041 100644
--- a/kernel/cobalt/timer.c
+++ b/kernel/cobalt/timer.c
@@ -57,7 +57,7 @@ static inline void xntimer_enqueue(xntimer_t *timer)
 static inline void xntimer_dequeue(xntimer_t *timer)
 {
        xntimerq_remove(&timer->sched->timerqueue, &timer->aplink);
-       __setbits(timer->status, XNTIMER_DEQUEUED);
+       timer->status |= XNTIMER_DEQUEUED;
 }
 
 void xntimer_next_local_shot(xnsched_t *sched)
@@ -106,7 +106,7 @@ void xntimer_next_local_shot(xnsched_t *sched)
                    !xnthread_test_state(sched->curr, XNROOT)) {
                        h = xntimerq_it_next(&sched->timerqueue, &it, h);
                        if (h) {
-                               __setbits(sched->lflags, XNHDEFER);
+                               sched->lflags |= XNHDEFER;
                                timer = aplink2timer(h);
                        }
                }
@@ -299,7 +299,7 @@ int xntimer_start(xntimer_t *timer,
                date = xnarch_ns_to_tsc(value) + now;
                break;
        case XN_REALTIME:
-               __setbits(timer->status, XNTIMER_REALTIME);
+               timer->status |= XNTIMER_REALTIME;
                value -= xnclock_get_offset();
                /* fall through */
        default: /* XN_ABSOLUTE || XN_REALTIME */
@@ -315,7 +315,7 @@ int xntimer_start(xntimer_t *timer,
        if (interval != XN_INFINITE) {
                timer->interval = xnarch_ns_to_tsc(interval);
                timer->pexpect = date;
-               __setbits(timer->status, XNTIMER_PERIODIC);
+               timer->status |= XNTIMER_PERIODIC;
        }
 
        xntimer_enqueue(timer);
@@ -496,7 +496,7 @@ void xntimer_tick(void)
         * invoked timer handlers can wait until we leave the tick
         * handler. Use this status flag as hint to xntimer_start().
         */
-       __setbits(sched->status, XNINTCK);
+       sched->status |= XNINTCK;
 
        now = xnclock_read_raw();
        while ((holder = xntimerq_head(timerq)) != NULL) {
@@ -529,7 +529,7 @@ void xntimer_tick(void)
                                 */
                                if (!xntimer_reload_p(timer))
                                        continue;
-                               __setbits(timer->status, XNTIMER_FIRED);
+                               timer->status |= XNTIMER_FIRED;
                        } else if (likely((timer->status & XNTIMER_PERIODIC) == 
0)) {
                                /*
                                 * Make the blocked timer elapse again
@@ -553,7 +553,7 @@ void xntimer_tick(void)
                         * save some I-cache, which translates into
                         * precious microsecs on low-end hw.
                         */
-                       __setbits(sched->lflags, XNHTICK);
+                       sched->lflags |= XNHTICK;
                        __clrbits(sched->lflags, XNHDEFER);
                        if ((timer->status & XNTIMER_PERIODIC) == 0)
                                continue;
@@ -664,7 +664,7 @@ void xntimer_destroy(xntimer_t *timer)
 
        xnlock_get_irqsave(&nklock, s);
        xntimer_stop(timer);
-       __setbits(timer->status, XNTIMER_KILLED);
+       timer->status |= XNTIMER_KILLED;
        timer->sched = NULL;
 #ifdef CONFIG_XENO_OPT_STATS
        list_del(&timer->tblink);
@@ -800,7 +800,7 @@ void xntimer_freeze(void)
        for_each_online_cpu(cpu) {
                timerq = &xnpod_sched_slot(cpu)->timerqueue;
                while ((holder = xntimerq_head(timerq)) != NULL) {
-                       __setbits(aplink2timer(holder)->status, 
XNTIMER_DEQUEUED);
+                       aplink2timer(holder)->status |= XNTIMER_DEQUEUED;
                        xntimerq_remove(timerq, holder);
                }
        }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to